diff --git "a/analysis-state/hybrid-review-cache.jsonl" "b/analysis-state/hybrid-review-cache.jsonl" new file mode 100644--- /dev/null +++ "b/analysis-state/hybrid-review-cache.jsonl" @@ -0,0 +1,833 @@ +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7102, "estimated_input_tokens": 3423, "item_count": 17, "node_count": 17, "serialized_chars": 13691, "soft_pair_count": 12}, "cached_at": "2026-04-14T12:29:59Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "06336f4e5ef35137ea9077d0e4022b88e209daea08c0f45966bc1ddd21dec50e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:24643", "issue:30277", "issue:34928", "issue:36331", "issue:39290", "issue:43039", "issue:43425", "issue:43596", "issue:43638", "issue:43828", "issue:43975", "issue:44292", "issue:44457", "issue:44805", "issue:45081", "issue:45161", "issue:45237"], "result": {"analyst_result": {"best_issue_reason": "#34928 is the most suitable issue to anchor the cluster because it is the most substantive and well-developed report, even though it is not a duplicate of the others.", "best_pr_reason": null, "canonical_issue_reason": "#34928 has the strongest signal as a canonical representative: detailed reproduction context, the highest discussion activity, and a concrete training-time failure mode.", "canonical_pr_reason": null, "confidence": 0.18, "soft_edge_verdicts": [{"accept": false, "left": "issue:43975", "reason": "Different bugs: one is a detokenization regression for DeepSeek Coder v5, the other is a LoRA merge/save/reload output mismatch.", "right": "issue:44457"}, {"accept": false, "left": "issue:43039", "reason": "Unrelated failure modes: Liger Kernel cross-entropy path vs GPT-OSS MoE tensor-parallel behavior.", "right": "issue:45161"}, {"accept": false, "left": "issue:44292", "reason": "Different models and symptoms: Qwen-3-8B-NVFP4 runtime error versus LoRA weight merge/reload inconsistency.", "right": "issue:44457"}, {"accept": false, "left": "issue:34928", "reason": "Both are shape/indexing errors, but the affected stack and repros differ: FSDP + activation checkpointing vs mask/tensor indexing mismatch.", "right": "issue:44805"}, {"accept": false, "left": "issue:24643", "reason": "Both involve DeepSpeed, but one is a 2-D weight requirement error and the other is a collective mismatch on ranks; not the same underlying bug.", "right": "issue:30277"}, {"accept": false, "left": "issue:43975", "reason": "Different problems and hardware contexts: detokenization regression versus AMD GPU inference/runtime failure for GPT-OSS-20B.", "right": "issue:45237"}, {"accept": false, "left": "issue:43638", "reason": "Different root causes: BertModel Zero3 init index error versus GPT-OSS MoE tensor-parallel malfunction.", "right": "issue:45161"}, {"accept": false, "left": "issue:34928", "reason": "One is an FSDP/activation-checkpointing tensor-size mismatch; the other is a trainer API signature break around compute_loss().", "right": "issue:36331"}, {"accept": false, "left": "issue:43596", "reason": "Both mention model-parallel setup, but the concrete errors are unrelated: BertModel Zero3 init indexing versus GPT-OSS MoE TP not working.", "right": "issue:45161"}, {"accept": false, "left": "issue:39290", "reason": "Different model/tokenizer compatibility issues: Gemma3 config missing a field for vLLM versus a Mistral regex patch crashing on tokenizer internals.", "right": "issue:45081"}, {"accept": false, "left": "issue:43828", "reason": "Different symptoms and code paths: autocast dtype mismatch for Phi-tiny-MoE versus GPT-OSS MoE tensor-parallel failure.", "right": "issue:45161"}, {"accept": false, "left": "issue:43425", "reason": "A broad Torch-version incompatibility report is not the same as a specific Qwen-3-8B-NVFP4 runtime error.", "right": "issue:44292"}], "summary": "No soft pair looks like a true duplicate; the titles share only broad ecosystem terms (DeepSpeed, model names, training errors) but describe different concrete failure modes. I\u2019d use issue #34928 as the best representative because it is the most detailed and highest-discussion bug report in the set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43975|issue:44457", "issue:43039|issue:45161", "issue:44292|issue:44457", "issue:34928|issue:44805", "issue:24643|issue:30277", "issue:43975|issue:45237", "issue:43638|issue:45161", "issue:34928|issue:36331", "issue:43596|issue:45161", "issue:39290|issue:45081", "issue:43828|issue:45161", "issue:43425|issue:44292"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7122, "estimated_input_tokens": 3433, "item_count": 18, "node_count": 18, "serialized_chars": 13732, "soft_pair_count": 11}, "cached_at": "2026-04-14T12:31:01Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a5ead5d50cc925c8837d944b244afd97b349a8eb02df5fba6b03255f5c1770ea", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39290", "issue:41093", "issue:41720", "issue:41762", "issue:43039", "issue:43404", "issue:43425", "issue:43531", "issue:43596", "issue:43638", "issue:43792", "issue:43854", "issue:43866", "issue:43901", "issue:44512", "issue:44560", "issue:44863", "issue:45070"], "result": {"analyst_result": {"best_issue_reason": "issue:43596 is the cleanest representative of the closest subgroup (Deepspeed ZeRO-3 / index-out-of-bounds failures), but it still does not subsume the rest of the cluster.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits the cluster well; the items are mostly unrelated and only a small ZeRO-3 IndexError subgroup is remotely similar.", "canonical_pr_reason": null, "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "issue:41762", "reason": "Both mention ZeRO-3 and index-out-of-bounds, but the models and failure points differ (Gemma3 loading vs BertModel init), so this is not the same bug.", "right": "issue:43596"}, {"accept": false, "left": "issue:43901", "reason": "Both are documentation issues, but they target different docs and different behavior changes.", "right": "issue:44512"}, {"accept": false, "left": "issue:39290", "reason": "Different model/runtime problems: Gemma3 config attribute missing in vLLM vs Qwen3 auto device mapping cuda assert.", "right": "issue:41720"}, {"accept": false, "left": "issue:43039", "reason": "Completely different failures: Liger Kernel cross_entropy behavior vs Qwen3-vl-embedding video StopIteration.", "right": "issue:44560"}, {"accept": false, "left": "issue:43866", "reason": "Both are model loading problems, but the reported causes and models are unrelated (corrupted checkpoint vs NemotronH checkpoint loading).", "right": "issue:44863"}, {"accept": false, "left": "issue:43854", "reason": "Different models and different failure modes; one is a unit-test load issue, the other is a corrupted checkpoint report.", "right": "issue:43866"}, {"accept": false, "left": "issue:43425", "reason": "Torch version incompatibility and a pydantic PretrainedConfig regression are distinct incompatibility classes.", "right": "issue:45070"}, {"accept": false, "left": "issue:39290", "reason": "Both involve sliding_window/config handling, but they affect different models and different concrete errors.", "right": "issue:43531"}, {"accept": false, "left": "issue:41093", "reason": "Both are IndexError reports, but the symptoms and code paths differ (mask shape mismatch vs ZeRO-3/BertModel empty-index failure).", "right": "issue:43638"}, {"accept": false, "left": "issue:43792", "reason": "Unrelated issues: Whisper model runtime failure vs a v5.4.0 PretrainedConfig/pydantic regression.", "right": "issue:45070"}, {"accept": false, "left": "issue:43404", "reason": "Different model-specific loading/tie-weight issue vs corrupted checkpoint issue; not the same underlying bug.", "right": "issue:43866"}], "summary": "This cluster is heterogeneous: it mixes unrelated model-loading bugs, environment incompatibilities, docs issues, and a few similar-but-not-identical Deepspeed/IndexError reports. I would not merge any of the soft pairs as true duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:41762|issue:43596", "issue:43901|issue:44512", "issue:39290|issue:41720", "issue:43039|issue:44560", "issue:43866|issue:44863", "issue:43854|issue:43866", "issue:43425|issue:45070", "issue:39290|issue:43531", "issue:41093|issue:43638", "issue:43792|issue:45070", "issue:43404|issue:43866"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7118, "estimated_input_tokens": 3431, "item_count": 18, "node_count": 18, "serialized_chars": 13723, "soft_pair_count": 11}, "cached_at": "2026-04-14T12:31:18Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fe9d5ec5a6454febabc03808c56bf1acdf924834c95881bbc5f0077b4b1cdaea", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:39290", "issue:41093", "issue:41762", "issue:43296", "issue:43366", "issue:43531", "issue:43541", "issue:43572", "issue:43828", "issue:44291", "issue:44387", "issue:44589", "issue:44841", "issue:45005", "issue:45070", "issue:45084", "issue:45161"], "result": {"analyst_result": {"best_issue_reason": "issue:44291 is the strongest standalone issue for triage because it is precise, reproducible, and likely maps cleanly to a single code-path fix.", "best_pr_reason": null, "canonical_issue_reason": "issue:44291 is the best representative bug report in this set: it is concrete, version-scoped, and describes a specific failure mode with a clear trigger (`init_empty_weights` on transformers >= 5.0.0rc0).", "canonical_pr_reason": null, "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "issue:43541", "reason": "Both involve model/runtime errors, but one is a grouped_mm tracing failure on Mixtral and the other is an autocast dtype mismatch on Phi-tiny-MoE; different models, triggers, and code paths.", "right": "issue:43828"}, {"accept": false, "left": "issue:36010", "reason": "An import error for `GenerationMixin` is unrelated to a Gemma3 config attribute missing in vLLM; no shared concrete failure mechanism.", "right": "issue:39290"}, {"accept": false, "left": "issue:44841", "reason": "Processor failure for Voxtral is a model-specific preprocessing bug, while 'Can't compile non template nodes' is a compiler/template issue; these do not look like the same defect.", "right": "issue:45084"}, {"accept": false, "left": "issue:41093", "reason": "Both are IndexErrors, but one is a mask/tensor shape mismatch during indexing and the other is a zero-sized dimension when loading under ZeRO-3; different underlying causes.", "right": "issue:41762"}, {"accept": false, "left": "issue:43296", "reason": "PaddleOCR-VL loading in vLLM and GGUF gpt-oss support are different model-compatibility requests/issues, not one bug.", "right": "issue:43366"}, {"accept": false, "left": "issue:43572", "reason": "Missing `pad_token_idx` in StableLmConfig and a pydantic `PretrainedConfig` field breakage are separate config-schema regressions affecting different APIs.", "right": "issue:45070"}, {"accept": false, "left": "issue:44387", "reason": "CUDA reserved-memory OOM under int4 quantization and tied-weights issues in translation models are both v5 regressions, but they are distinct problems in different subsystems.", "right": "issue:45005"}, {"accept": false, "left": "issue:43541", "reason": "A float32 grouped_mm tracing error on Mixtral is unrelated to tensor-parallelism not working for GPT-OSS MoE; different model families and failure modes.", "right": "issue:45161"}, {"accept": false, "left": "issue:44291", "reason": "Unexpected `_is_hf_initialized` argument during `init_empty_weights` loading is unrelated to a pydantic `PretrainedConfig` field regression.", "right": "issue:45070"}, {"accept": false, "left": "issue:43531", "reason": "A `sliding_window` issue for Qwen3-MoE is a model/config-specific bug, not the same as the pydantic `PretrainedConfig` breakage.", "right": "issue:45070"}, {"accept": false, "left": "issue:43541", "reason": "Grouped_mm tracing errors and missing `Float8_e4m3fnStorage` are distinct runtime failures with different symptoms and likely different fixes.", "right": "issue:44589"}], "summary": "The items are mostly unrelated Transformers/vLLM bug reports and feature requests. None of the proposed soft pairs look like the same underlying bug or change, so all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43541|issue:43828", "issue:36010|issue:39290", "issue:44841|issue:45084", "issue:41093|issue:41762", "issue:43296|issue:43366", "issue:43572|issue:45070", "issue:44387|issue:45005", "issue:43541|issue:45161", "issue:44291|issue:45070", "issue:43531|issue:45070", "issue:43541|issue:44589"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6952, "estimated_input_tokens": 3348, "item_count": 18, "node_count": 18, "serialized_chars": 13389, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:31:43Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b8208082022bf361ef8bbccb3d866d0bf1e8fb4a27b2fb8089851a96bcc8834a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39186", "issue:39290", "issue:40990", "issue:41093", "issue:42915", "issue:43054", "issue:43404", "issue:43541", "issue:43596", "issue:43645", "issue:43742", "issue:43994", "issue:44530", "issue:44863", "issue:44918", "issue:44960", "issue:45005", "issue:45399"], "result": {"analyst_result": {"best_issue_reason": "45005 is the least-bad representative for the cluster, but only weakly: the rest of the items span unrelated config, loading, tracing, cache, and inference bugs.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45005 is the broadest umbrella-like item in the set because it explicitly targets a class of tied-weight regressions and is open, but it still does not actually cover most of the other model/runtime-specific reports.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:39290", "reason": "Different failures and model paths: Gemma3TextConfig/vLLM attribute access vs MobileLLM checkpoint loading key lookup.", "right": "issue:43742"}, {"accept": false, "left": "issue:43054", "reason": "Both mention siglip2, but one is about degraded text embeddings and the other about bad AutoModel/pipeline outputs; not enough evidence of the same underlying bug.", "right": "issue:43994"}, {"accept": false, "left": "issue:41093", "reason": "Both are IndexErrors, but the concrete mismatch differs: mask/tensor shape mismatch vs zero3 init on BertModel.", "right": "issue:43596"}, {"accept": false, "left": "issue:42915", "reason": "Different models and code paths: Qwen3Moe with FineGrainedFP8Config vs Mixtral float32 grouped_mm failures during torch dynamo tracing.", "right": "issue:43541"}, {"accept": false, "left": "issue:40990", "reason": "No clear shared bug; one is a perplexity-quality report on gpt-oss-20b, the other is a GLM5 issue with no matching concrete symptom.", "right": "issue:44960"}, {"accept": false, "left": "issue:39186", "reason": "FSDP weight-dimensionality error is unrelated to Qwen3.5 embedding unpacking in TRL SFT trainer.", "right": "issue:44918"}, {"accept": false, "left": "issue:43404", "reason": "Both involve tied weights, but they concern different models and different failure modes; too broad to be the same concrete bug.", "right": "issue:45005"}, {"accept": false, "left": "issue:43645", "reason": "Custom-model Jupyter initialization breakage is unrelated to flash-attn2 fallback gating.", "right": "issue:45399"}, {"accept": false, "left": "issue:44530", "reason": "PagedAttentionCache/linear_attention crash on Qwen3.5 is a different subsystem and symptom from tied-weight translation model issues.", "right": "issue:45005"}, {"accept": false, "left": "issue:44863", "reason": "NemotronH checkpoint loading and tied-weights regressions are separate problems with no shared concrete code path.", "right": "issue:45005"}], "summary": "The cluster is heterogeneous: the soft-similarity pairs mostly share only broad wording (model names, index errors, weights, or fallback behavior) but not the same concrete bug or change. None of the candidate edges look like true duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:39290|issue:43742", "issue:43054|issue:43994", "issue:41093|issue:43596", "issue:42915|issue:43541", "issue:40990|issue:44960", "issue:39186|issue:44918", "issue:43404|issue:45005", "issue:43645|issue:45399", "issue:44530|issue:45005", "issue:44863|issue:45005"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6878, "estimated_input_tokens": 3311, "item_count": 18, "node_count": 18, "serialized_chars": 13243, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:31:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "65fa5b46ab63c6c636abbeba830fefd63aebb5593c33f2dca9260a8f63805f9f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39692", "issue:41553", "issue:42915", "issue:43054", "issue:43644", "issue:43828", "issue:43873", "issue:43883", "issue:43950", "issue:43975", "issue:44292", "issue:44360", "issue:44492", "issue:44512", "issue:44534", "issue:44928", "issue:45005", "issue:45399"], "result": {"analyst_result": {"best_issue_reason": "43950 is the best representative issue because it states the regression precisely and has the strongest problem description among the duplicate candidates.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43950 is the clearest, most specific report of the non-persistent-buffer corruption regression and aligns directly with the related v5 buffer-bug reports.", "canonical_pr_reason": null, "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "issue:43873", "reason": "Quantization/offloading behavior vs tied-weights handling are different bugs; same broad area only.", "right": "issue:45005"}, {"accept": false, "left": "issue:44360", "reason": "DSA ReLU discussion is unrelated to a docs note about a removed run command.", "right": "issue:44512"}, {"accept": false, "left": "issue:42915", "reason": "FP8 model failure and flash-attn fallback gating are distinct code paths and symptoms.", "right": "issue:45399"}, {"accept": false, "left": "issue:43975", "reason": "Different models and failure modes; one is detokenization, the other a runtime error for a quantized Qwen model.", "right": "issue:44292"}, {"accept": false, "left": "issue:44492", "reason": "A typo in cache strategy docs is unrelated to the removed transformers run command docs issue.", "right": "issue:44512"}, {"accept": true, "left": "issue:43950", "reason": "Both report the same v5 regression where non-persistent buffers are mishandled/corrupted by from_pretrained.", "right": "issue:44534"}, {"accept": true, "left": "issue:43644", "reason": "Same underlying bug: transformers v5 fills non-persistent buffers with junk / corrupts them on load.", "right": "issue:43950"}, {"accept": false, "left": "issue:43828", "reason": "Both mention dtype/precision problems, but they involve different models and different mechanisms.", "right": "issue:44928"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 docs/example errors are not the same as a text-embedding quality regression.", "right": "issue:43054"}, {"accept": false, "left": "issue:41553", "reason": "A bad Voxtral tokenizer error message and a Molmo AttributeError are unrelated loading issues.", "right": "issue:43883"}], "summary": "This cluster is mostly heterogeneous. The only clear duplicate set is the Transformers v5 non-persistent-buffer regression: issues 43644, 43950, and 44534 describe the same corruption bug. The rest are distinct model-, docs-, or error-message-specific reports."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43873|issue:45005", "issue:44360|issue:44512", "issue:42915|issue:45399", "issue:43975|issue:44292", "issue:44492|issue:44512", "issue:43950|issue:44534", "issue:43644|issue:43950", "issue:43828|issue:44928", "issue:39692|issue:43054", "issue:41553|issue:43883"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7066, "estimated_input_tokens": 3405, "item_count": 18, "node_count": 18, "serialized_chars": 13619, "soft_pair_count": 11}, "cached_at": "2026-04-14T12:32:24Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9359bb9a501fbe1829e8029cd6b459e64d50175f2a4655df14061e9b6d094363", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36331", "issue:38175", "issue:41553", "issue:41762", "issue:42947", "issue:43054", "issue:43493", "issue:43643", "issue:43856", "issue:43866", "issue:43872", "issue:43881", "issue:43883", "issue:44387", "issue:44403", "issue:44451", "issue:44589", "issue:45020"], "result": {"analyst_result": {"best_issue_reason": "43493 is the most actionable representative among the items, but only for the SigLIP2 subcluster; the rest of the issues are unrelated enough that none should be collapsed into it.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue cleanly represents the whole set; if forced, issue 43493 is the best representative of the only coherent subtheme because it states the concrete HF-vs-JAX SigLIP2 discrepancy.", "canonical_pr_reason": null, "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "issue:43866", "reason": "Different problems: a corrupted checkpoint versus generic loading noise. No shared underlying bug is evident.", "right": "issue:44403"}, {"accept": false, "left": "issue:36331", "reason": "One is a Trainer API kwarg regression, the other is a ZeRO-3 model-loading IndexError. Different code paths and fixes.", "right": "issue:41762"}, {"accept": false, "left": "issue:43872", "reason": "Both are low-level type/load failures, but the reported failures and likely causes differ: bitsandbytes constructor kwargs vs missing float8 storage support.", "right": "issue:44589"}, {"accept": false, "left": "issue:41553", "reason": "Different models and failure modes: bad AutoTokenizer messaging for Voxtral versus a GLM-4V loading failure.", "right": "issue:43881"}, {"accept": false, "left": "issue:38175", "reason": "Same model family, but the symptoms are distinct enough that a shared root cause is not established from the titles alone.", "right": "issue:43054"}, {"accept": false, "left": "issue:42947", "reason": "Gradient checkpointing/LoRA behavior and int4 CUDA reserved-memory OOM are different mechanisms, not the same bug.", "right": "issue:44387"}, {"accept": false, "left": "issue:43881", "reason": "GLM-4V loading failure and Molmo tied-weights attribute error are unrelated model-specific issues.", "right": "issue:43883"}, {"accept": true, "left": "issue:43054", "reason": "Both are about SigLIP2 behaving incorrectly relative to expected reference behavior; the JAX-vs-HF discrepancy is a plausible concrete root cause for the degraded text embeddings.", "right": "issue:43493"}, {"accept": false, "left": "issue:44403", "reason": "Generic loading noise is not the same as a specific inability to load ScandiBERT. Too broad to merge.", "right": "issue:44451"}, {"accept": false, "left": "issue:42947", "reason": "Both mention memory, but one is about LoRA checkpointing effectiveness and the other about Qwen3 MoE training memory usage; different underlying causes.", "right": "issue:43856"}, {"accept": false, "left": "issue:43643", "reason": "Issue 45020 is a broad umbrella about remote_code regressions, while 43643 is a specific AutoConfig missing-fields bug. Not safe to merge as the same defect.", "right": "issue:45020"}], "summary": "The set is mostly heterogeneous and does not form one duplicate cluster. The only plausible duplicate pair is the two SigLIP2 issues, which both point at an implementation discrepancy affecting text embeddings."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43866|issue:44403", "issue:36331|issue:41762", "issue:43872|issue:44589", "issue:41553|issue:43881", "issue:38175|issue:43054", "issue:42947|issue:44387", "issue:43881|issue:43883", "issue:43054|issue:43493", "issue:44403|issue:44451", "issue:42947|issue:43856", "issue:43643|issue:45020"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6694, "estimated_input_tokens": 3219, "item_count": 17, "node_count": 17, "serialized_chars": 12876, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:32:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fdcf29be42b88e9d06f879255d1595198c836eebc6064dd25b72957a599d52a1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41762", "issue:42491", "issue:42915", "issue:43054", "issue:43278", "issue:43782", "issue:43824", "issue:43872", "issue:43975", "issue:44368", "issue:44403", "issue:44451", "issue:44488", "issue:44661", "issue:44960", "issue:45020", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "Issue 45020 is the best global candidate by breadth and recency, but only as a loose umbrella for loading-regression reports; the rest of the cluster is too diverse for a single canonical issue.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45020 is the broadest open report here, centered on recent transformer-version regressions breaking model loading with remote_code. It is the closest thing to an umbrella, though it does not \u05d1\u05d0\u05de\u05ea subsume most of the other model-specific failures.", "canonical_pr_reason": null, "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "issue:43782", "reason": "Different models and failure modes: Qwen3VL weight_only loading error vs GLM5 issue. Same general area, not the same bug.", "right": "issue:44960"}, {"accept": false, "left": "issue:44403", "reason": "One is a generic loading-noise complaint, the other is a concrete model load failure for a specific checkpoint. Not the same underlying problem.", "right": "issue:44488"}, {"accept": false, "left": "issue:42491", "reason": "Both mention Qwen3 and LoRA/tie_word_embeddings, but one is a version incompatibility for a trained model and the other is a warning during fine-tuning. Too different to merge.", "right": "issue:44368"}, {"accept": false, "left": "issue:41762", "reason": "Both are special-config failures, but they affect different architectures and code paths (Gemma3 ZeRO-3 vs Qwen3Moe FP8). Not mergeable as one bug.", "right": "issue:42915"}, {"accept": false, "left": "issue:43975", "reason": "DeepSeek detokenization regression and ScandiBERT loading failure are unrelated.", "right": "issue:44451"}, {"accept": false, "left": "issue:45020", "reason": "Both are recent regressions, but one is broad remote_code loading breakage and the other is a tokenizer codec/warning regression for Kimi-K2.5. Different code paths.", "right": "issue:45356"}, {"accept": false, "left": "issue:44661", "reason": "Both involve model-loading infrastructure, but one is a TOKENIZER_MAPPING_NAMES edge case and the other is remote_code breakage. Related subsystem, not same bug.", "right": "issue:45020"}, {"accept": false, "left": "issue:43824", "reason": "ImportError for a missing class and bitsandbytes Int8Params signature incompatibility are separate issues.", "right": "issue:43872"}, {"accept": false, "left": "issue:43975", "reason": "Different models and different symptoms: detokenization bug vs loading failure. No shared underlying defect.", "right": "issue:44488"}, {"accept": false, "left": "issue:43054", "reason": "Siglip2 embedding quality regression and a BF16-to-FP32 dtype change are not the same concrete issue; one may be a consequence, but the reports are distinct.", "right": "issue:43278"}], "summary": "This cluster is heterogeneous: most items are distinct model- or config-specific regressions (loading, tokenization, quantization, dtype, and warnings), not true duplicates. The soft-similarity pairs all look like false positives rather than one underlying bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43782|issue:44960", "issue:44403|issue:44488", "issue:42491|issue:44368", "issue:41762|issue:42915", "issue:43975|issue:44451", "issue:45020|issue:45356", "issue:44661|issue:45020", "issue:43824|issue:43872", "issue:43975|issue:44488", "issue:43054|issue:43278"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6882, "estimated_input_tokens": 3313, "item_count": 17, "node_count": 17, "serialized_chars": 13250, "soft_pair_count": 11}, "cached_at": "2026-04-14T12:33:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6538183e0e3625cfd05ad444404c55da8bcd45f64103daad0ceeb9ea804cf28e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41553", "issue:42915", "issue:43257", "issue:43404", "issue:43742", "issue:43792", "issue:43827", "issue:43866", "issue:43872", "issue:43950", "issue:44220", "issue:44291", "issue:44509", "issue:44991", "issue:45020", "issue:45081", "issue:45399"], "result": {"analyst_result": {"best_issue_reason": "Issue 45020 is the most useful representative issue to keep at the top level: it is broad, still open, and captures the recurring 'recent versions break loading' regression theme better than the narrower model-specific reports.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45020 is the best anchor for the cluster because it is the broadest open regression report ('recent transformers versions break models using remote_code'), but the overall set is heterogeneous rather than a single clean duplicate family.", "canonical_pr_reason": null, "confidence": 0.78, "soft_edge_verdicts": [{"accept": false, "left": "issue:44991", "reason": "Both are loading regressions, but one is a tokenizer failure for EMBEDDIA/est-roberta and the other is a flash-attn2 fallback issue; they involve different code paths and different symptoms.", "right": "issue:45399"}, {"accept": true, "left": "issue:43792", "reason": "Both point to the same Whisper/audio preprocessing path; the generic 'can't run' report is plausibly the same `_torch_extract_fbank_features()` failure.", "right": "issue:44220"}, {"accept": true, "left": "issue:43827", "reason": "These are the same docs regression: v5 removed `pipeline()` support for summarization/translation-style task examples, and both reports describe stale documentation references.", "right": "issue:44509"}, {"accept": false, "left": "issue:45081", "reason": "`_patch_mistral_regex` tokenizer crash and flash-attn2 fallback blocking are unrelated issues in different parts of the loading stack.", "right": "issue:45399"}, {"accept": false, "left": "issue:43404", "reason": "One is a Mistral3 lm_head tying bug, the other is a `from_pretrained()` non-persistent buffer regression; they are different model-loading defects.", "right": "issue:43950"}, {"accept": false, "left": "issue:43257", "reason": "Both involve model/checkpoint loading, but they concern different models and different failure modes (Qwen3 MoE conversion under accelerate+deepspeed vs. corrupted Ovis2 1B checkpoint).", "right": "issue:43866"}, {"accept": false, "left": "issue:42915", "reason": "`Qwen3Moe` with `FineGrainedFP8Config` is a specific quantization/configuration bug, while 45020 is a broad remote_code compatibility report; not the same concrete change.", "right": "issue:45020"}, {"accept": false, "left": "issue:41553", "reason": "Both are tokenizer-loading complaints, but they target different models and different failure mechanisms; the titles do not indicate the same underlying bug.", "right": "issue:44991"}, {"accept": false, "left": "issue:43742", "reason": "A KeyError on `facebook/MobileLLM-125M` may be a regression, but the report is too model-specific to merge with the broad remote_code compatibility issue.", "right": "issue:45020"}, {"accept": false, "left": "issue:43742", "reason": "These describe different failures: a model-loading KeyError versus an attention-backend fallback block.", "right": "issue:45399"}, {"accept": true, "left": "issue:43872", "reason": "Both reports describe the same `_is_hf_initialized` argument regression during model loading / init-empty-weights flows, with the bitsandbytes error being the concrete manifestation of the same incompatibility.", "right": "issue:44291"}], "summary": "This cluster is mostly a set of unrelated transformer regressions and docs issues. The only strong duplicate pairs are the v5 docs pipeline-removal reports, the Whisper/audio feature-extraction failure, and the `_is_hf_initialized` loading regression; the rest are model-specific or subsystem-specific but not the same underlying bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44991|issue:45399", "issue:43792|issue:44220", "issue:43827|issue:44509", "issue:45081|issue:45399", "issue:43404|issue:43950", "issue:43257|issue:43866", "issue:42915|issue:45020", "issue:41553|issue:44991", "issue:43742|issue:45020", "issue:43742|issue:45399", "issue:43872|issue:44291"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6710, "estimated_input_tokens": 3227, "item_count": 17, "node_count": 17, "serialized_chars": 12906, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:33:28Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0edb41504fe2b39df288a089eb17331a7b99510dab5aa0f622e56f719bc8f204", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36331", "issue:41093", "issue:41553", "issue:42915", "issue:43257", "issue:43854", "issue:43872", "issue:43940", "issue:44479", "issue:44488", "issue:44530", "issue:44560", "issue:44805", "issue:44863", "issue:45084", "issue:45305", "issue:45313"], "result": {"analyst_result": {"best_issue_reason": "No issue stands out as a global best representative because the cluster is not a single bug family; choosing any one would over-merge unrelated reports.", "best_pr_reason": null, "canonical_issue_reason": "No single issue is a safe canonical for this set: the items span distinct failures (training API, model loading, quantization, video regressions, and shape mismatches) rather than one duplicate thread.", "canonical_pr_reason": null, "confidence": 0.9, "soft_edge_verdicts": [{"accept": false, "left": "issue:43940", "reason": "Both are DeepSpeed ZeRO-3 load failures, but they target different Qwen variants and describe different symptoms; too broad to treat as the same concrete bug.", "right": "issue:45313"}, {"accept": false, "left": "issue:41553", "reason": "Both are model-loading problems, but one is a bad AutoTokenizer error for Voxtral and the other is a failure to load a specific model; no clear shared code-path bug.", "right": "issue:44488"}, {"accept": false, "left": "issue:42915", "reason": "Different incompatibilities: FineGrainedFP8Config on Qwen3Moe vs. a bitsandbytes Int8Params constructor kwarg error. Similar dependency area, not the same bug.", "right": "issue:43872"}, {"accept": false, "left": "issue:43854", "reason": "Both mention load failures, but they concern different model implementations/checkpoints and no evidence indicates a shared root cause.", "right": "issue:44863"}, {"accept": false, "left": "issue:36331", "reason": "Unrelated errors: a Trainer compute_loss signature mismatch versus a tensor mask shape IndexError.", "right": "issue:41093"}, {"accept": false, "left": "issue:43257", "reason": "Both involve DeepSpeed and Qwen3-family training, but one is about MOE weight conversion during load while the other is about gradient averaging with loss kwargs; different concrete bugs.", "right": "issue:45305"}, {"accept": false, "left": "issue:41093", "reason": "Both are shape-mismatch IndexErrors, but the reported tensors and contexts differ and there is no evidence they come from the same defect.", "right": "issue:44805"}, {"accept": false, "left": "issue:44479", "reason": "Both concern video handling in Qwen/VL models, but one is a broad v5.3.0 regression and the other is a StopIteration in a specific embedding path; not enough to merge.", "right": "issue:44560"}, {"accept": false, "left": "issue:44530", "reason": "Different failures in different model families: PagedAttentionCache/linear_attention on Qwen3.5 versus NemotronH checkpoint loading.", "right": "issue:44863"}, {"accept": false, "left": "issue:43872", "reason": "A bitsandbytes constructor incompatibility and a template-node compilation error are unrelated failures despite both being TypeErrors.", "right": "issue:45084"}], "summary": "This cluster is mostly a heterogeneous mix of unrelated issue reports; the proposed soft pairs look superficially similar but lack evidence of the same underlying bug or change. I would not collapse any of them together."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43940|issue:45313", "issue:41553|issue:44488", "issue:42915|issue:43872", "issue:43854|issue:44863", "issue:36331|issue:41093", "issue:43257|issue:45305", "issue:41093|issue:44805", "issue:44479|issue:44560", "issue:44530|issue:44863", "issue:43872|issue:45084"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7094, "estimated_input_tokens": 3419, "item_count": 18, "node_count": 18, "serialized_chars": 13675, "soft_pair_count": 11}, "cached_at": "2026-04-14T12:33:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7819029bd59a892bd6b6b00a333d7b037aa80683de97ad228d5c5f7f5f70a423", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:24643", "issue:30064", "issue:42617", "issue:42915", "issue:43366", "issue:43531", "issue:43550", "issue:43645", "issue:43646", "issue:43824", "issue:43828", "issue:43950", "issue:43957", "issue:44560", "issue:44589", "issue:44910", "issue:44918", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": "43646 is the strongest representative issue overall only in the limited sense that it captures the shared custom-model initialization regression; the rest of the cluster is unrelated.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43646 is the best canonical among the items because it is the broader, cleaner report of the only likely duplicate pair; 43645 appears to be the same regression with an added Jupyter-notebook context.", "canonical_pr_reason": null, "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "issue:43366", "reason": "Different bugs: GGUF support for gpt-oss vs a Phi-tiny-MoE autocast dtype mismatch.", "right": "issue:43828"}, {"accept": false, "left": "issue:24643", "reason": "Unrelated failures: DeepSpeed training weight-shape error vs image processor handling of void segmentation maps.", "right": "issue:30064"}, {"accept": false, "left": "issue:43366", "reason": "Both involve Qwen/gpt-oss model coverage, but the bugs are different: GGUF architecture support vs sliding_window behavior in Qwen3-MoE.", "right": "issue:43531"}, {"accept": false, "left": "issue:44560", "reason": "Different code paths: Qwen3-vl-embedding video StopIteration vs TRL SFT unpacking of Qwen3.5 embeddings.", "right": "issue:44918"}, {"accept": false, "left": "issue:43366", "reason": "No shared underlying bug; one is missing GGUF architecture support, the other is a Float8 storage deserialization issue.", "right": "issue:44589"}, {"accept": false, "left": "issue:43950", "reason": "Distinct regressions: non-persistent buffer loading corruption vs Qwen2.5-VL rope-index scaling for still images.", "right": "issue:45325"}, {"accept": false, "left": "issue:43550", "reason": "Attention-related, but not the same bug: Bamba-9B-v2 torch.compile+SDPA failure vs Qwen3.5 Flash Attention illegal memory access from misread 3D position_ids.", "right": "issue:44910"}, {"accept": true, "left": "issue:43645", "reason": "Same underlying regression: Transformers 5.0.0 breaks custom model initialization; 43645 is the notebook-specific wording of the same issue, while 43646 is the broader report.", "right": "issue:43646"}, {"accept": false, "left": "issue:43824", "reason": "Different problems: missing Qwen2_5_VL import vs meta-device loading failures across some models.", "right": "issue:43957"}, {"accept": false, "left": "issue:42915", "reason": "Different failures: Qwen3Moe with FineGrainedFP8Config vs model loading on torch.device(\"meta\").", "right": "issue:43957"}, {"accept": false, "left": "issue:42617", "reason": "Unrelated issues: inability to run 3d_parallel.py vs GGUF support for gpt-oss.", "right": "issue:43366"}], "summary": "The cluster is mostly heterogeneous and does not form one coherent duplicate set. The only clear duplicate-like pair is the Transformers 5.0.0 custom model initialization regression (43645/43646); the rest are distinct model-specific or subsystem-specific bugs. No PRs are present."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43366|issue:43828", "issue:24643|issue:30064", "issue:43366|issue:43531", "issue:44560|issue:44918", "issue:43366|issue:44589", "issue:43950|issue:45325", "issue:43550|issue:44910", "issue:43645|issue:43646", "issue:43824|issue:43957", "issue:42915|issue:43957", "issue:42617|issue:43366"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6980, "estimated_input_tokens": 3362, "item_count": 18, "node_count": 18, "serialized_chars": 13447, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:34:13Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "adf8d4e1c971bee8159de00045a533241aaad121c0f85b4d63b097c34e13912b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:38175", "issue:39692", "issue:42915", "issue:43317", "issue:43596", "issue:43638", "issue:43644", "issue:43646", "issue:43950", "issue:43994", "issue:44534", "issue:44560", "issue:44661", "issue:44756", "issue:44805", "issue:44877", "issue:45030"], "result": {"analyst_result": {"best_issue_reason": "issue:43644 is the best issue to keep as the representative duplicate target for the buffer-corruption bug; it is concrete, versioned, and matches the duplicate report closely.", "best_pr_reason": null, "canonical_issue_reason": "issue:43644 is the clearest canonical report for the non-persistent-buffer regression: it names the v5.0.0 breakage directly and is the more specific statement of the same bug.", "canonical_pr_reason": null, "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "issue:38175", "reason": "Both concern SigLIP2 output quality, but one is about zero probabilities and the other about nonsensical results in AutoModel/pipeline usage; too symptom-level and usage-specific to treat as the same bug.", "right": "issue:43994"}, {"accept": false, "left": "issue:43646", "reason": "Both mention Transformers v5 regressions, but one is custom model initialization and the other is from_pretrained non-persistent buffer corruption; different failure modes/code paths.", "right": "issue:43950"}, {"accept": false, "left": "issue:44560", "reason": "Different models and different runtime errors (StopIteration in video embedding vs mask/tensor shape mismatch); no evidence they share the same underlying defect.", "right": "issue:44805"}, {"accept": false, "left": "issue:44877", "reason": "Both are config-validation loading failures, but for different model families/configs and likely different schema issues; too broad to merge.", "right": "issue:45030"}, {"accept": false, "left": "issue:36010", "reason": "An import error for GenerationMixin is unrelated to a Qwen3Moe FineGrainedFP8Config failure; no shared code path.", "right": "issue:42915"}, {"accept": false, "left": "issue:43638", "reason": "DeepSpeed ZeRO-3/BertModel indexing failure is unrelated to add-new-model-like failing inside TOKENIZER_MAPPING_NAMES; different subsystems and symptoms.", "right": "issue:44661"}, {"accept": false, "left": "issue:43596", "reason": "BertModel with ZeRO-3 index-out-of-bounds and tokenizer-mapping logic for model registration are unrelated bugs.", "right": "issue:44661"}, {"accept": false, "left": "issue:43317", "reason": "device_map=auto dequantized-model offload failure and Strix Halo mmap OOM are both loading/performance problems, but not the same concrete bug.", "right": "issue:44756"}, {"accept": true, "left": "issue:43644", "reason": "These are essentially the same report: Transformers v5 fills non-persistent buffers with junk / corrupts register_buffer(persistent=False) state during loading.", "right": "issue:44534"}, {"accept": false, "left": "issue:38175", "reason": "Both involve SigLIP2, but one is model output corruption and the other is a documentation example with model/processor mismatch plus quantization failure; not the same underlying issue.", "right": "issue:39692"}], "summary": "The set is mostly heterogeneous; only the two non-persistent-buffer regression reports look like the same underlying bug. The rest share broad themes (SigLIP2, config loading, initialization, offload) but differ in concrete symptom and code path."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:38175|issue:43994", "issue:43646|issue:43950", "issue:44560|issue:44805", "issue:44877|issue:45030", "issue:36010|issue:42915", "issue:43638|issue:44661", "issue:43596|issue:44661", "issue:43317|issue:44756", "issue:43644|issue:44534", "issue:38175|issue:39692"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6436, "estimated_input_tokens": 3090, "item_count": 17, "node_count": 17, "serialized_chars": 12359, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:34:27Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "444c771bcc5a0060210bdb7ed54c652beec5eaec917a7852bb8e4b1e1fa07918", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39692", "issue:41720", "issue:42915", "issue:43708", "issue:43856", "issue:43873", "issue:43994", "issue:44075", "issue:44451", "issue:44488", "issue:44960", "issue:44977", "issue:44991", "issue:45313", "issue:45357", "issue:45362", "issue:45406"], "result": {"analyst_result": {"best_issue_reason": "No single issue cleanly represents the cluster because the items cover distinct problems; if forced, the most self-contained reports are model-specific loading/serving bugs, but none span the whole set.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:45313", "reason": "Same model family, but one is a ZeRO-3 load failure and the other is a save_pretrained key regression; different code paths and symptoms.", "right": "issue:45357"}, {"accept": false, "left": "issue:45362", "reason": "Different models and different failure sites: Qwen3.5 chat crash versus Gemma4Processor missing _tokenizer in serve.", "right": "issue:45406"}, {"accept": false, "left": "issue:44960", "reason": "Both involve model inference, but the reports are about different models and different mechanisms (GLM5 vs Qwen3.5 flash-attention generation).", "right": "issue:44977"}, {"accept": false, "left": "issue:44451", "reason": "Both mention model loading failures, but they target different checkpoints and do not clearly describe the same underlying regression.", "right": "issue:44488"}, {"accept": false, "left": "issue:43708", "reason": "Trainer checkpoint resumption/max_steps logic is unrelated to SGD optimizer argument handling.", "right": "issue:44075"}, {"accept": false, "left": "issue:39692", "reason": "Both mention SigLIP2, but one is a docs example mismatch/quantization issue while the other is nonsensical outputs with AutoModel/pipeline; not clearly the same bug.", "right": "issue:43994"}, {"accept": false, "left": "issue:44488", "reason": "Both are tokenizer/model-loading complaints, but they concern different models and no shared concrete failure is evident.", "right": "issue:44991"}, {"accept": false, "left": "issue:43856", "reason": "One is about Qwen3 MoE training memory usage, the other about quantization offloading behavior; distinct problems.", "right": "issue:43873"}, {"accept": false, "left": "issue:41720", "reason": "Different Qwen3 issues: cudaErrorAssert with auto device mapping versus failure with FineGrainedFP8Config.", "right": "issue:42915"}], "summary": "These issues are a heterogeneous set spanning unrelated models, training, serving, and tokenizer/loading regressions. None of the soft pairs look like the same concrete bug or change, so there is no single canonical issue for the cluster."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:45313|issue:45357", "issue:45362|issue:45406", "issue:44960|issue:44977", "issue:44451|issue:44488", "issue:43708|issue:44075", "issue:39692|issue:43994", "issue:44488|issue:44991", "issue:43856|issue:43873", "issue:41720|issue:42915"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6964, "estimated_input_tokens": 3354, "item_count": 17, "node_count": 17, "serialized_chars": 13415, "soft_pair_count": 12}, "cached_at": "2026-04-14T12:34:51Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "933b66c0ac34d2e071d71aa4e0624010d3719481f8aa2c0266f668ed0d1a89c6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42491", "issue:42915", "issue:43257", "issue:43404", "issue:43454", "issue:43644", "issue:43645", "issue:43720", "issue:43854", "issue:43856", "issue:43931", "issue:44387", "issue:44661", "issue:44683", "issue:44756", "issue:45276", "issue:45335"], "result": {"analyst_result": {"best_issue_reason": "45276 is the best issue to keep as the canonical report: it is open, specific enough to identify the bug, and broad enough to cover the related embedding-resize symptom family.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45276 is the broadest and clearest representative of the shared resize_token_embeddings bug, and it can naturally absorb the narrower t5gemma variant.", "canonical_pr_reason": null, "confidence": 0.77, "soft_edge_verdicts": [{"accept": false, "left": "issue:43854", "reason": "Different model families and different loading failures; both are about model loading, but not the same underlying bug.", "right": "issue:43931"}, {"accept": false, "left": "issue:44661", "reason": "Unrelated problems: tokenizer-mapping logic for add-new-model-like vs flex_attention compilation on torch 2.9.", "right": "issue:44683"}, {"accept": false, "left": "issue:42491", "reason": "Both involve accelerate loading, but one is a Qwen3 MoE LoRA/version mismatch and the other is BitNet packed-weight unpacking; separate code paths.", "right": "issue:43720"}, {"accept": false, "left": "issue:42915", "reason": "Qwen3Moe FP8 loading/training failure and Strix Halo mmap OOM are different bugs with different causes.", "right": "issue:44756"}, {"accept": false, "left": "issue:42491", "reason": "LoRA/Qwen3 MoE compatibility issue is unrelated to resize_token_embeddings behavior.", "right": "issue:45276"}, {"accept": false, "left": "issue:43257", "reason": "Qwen3 MoE weight conversion with accelerate+deepspeed and GLM-4.7-Flash loading are distinct model-loading bugs.", "right": "issue:43854"}, {"accept": false, "left": "issue:43856", "reason": "Both mention memory, but one is Qwen3 MoE training inefficiency and the other is an int4 CUDA reserved-memory regression.", "right": "issue:44387"}, {"accept": false, "left": "issue:43644", "reason": "Both are Transformers 5.0.0 regressions, but they describe different failures: non-persistent buffers junk vs notebook custom-model initialization.", "right": "issue:43645"}, {"accept": false, "left": "issue:42491", "reason": "Different models and different symptoms; not the same underlying issue.", "right": "issue:43931"}, {"accept": false, "left": "issue:42491", "reason": "Different Qwen3 MoE-related problems: one is model compatibility/loading, the other is training memory usage.", "right": "issue:43856"}, {"accept": true, "left": "issue:45276", "reason": "Both report the same resize_token_embeddings bug where resized embeddings are not propagated to model-specific embedding/output layers in closely related Gemma-family variants.", "right": "issue:45335"}, {"accept": false, "left": "issue:43404", "reason": "Both involve untied lm_head weights, but they concern different model families and likely separate implementations, so they are not safely the same bug.", "right": "issue:43454"}], "summary": "Most pairs are unrelated issue reports that only share a broad subsystem. The only strong duplicate-like pair is the Gemma resize_token_embeddings reports, which describe the same embedding-resize propagation bug across closely related model variants."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43854|issue:43931", "issue:44661|issue:44683", "issue:42491|issue:43720", "issue:42915|issue:44756", "issue:42491|issue:45276", "issue:43257|issue:43854", "issue:43856|issue:44387", "issue:43644|issue:43645", "issue:42491|issue:43931", "issue:42491|issue:43856", "issue:45276|issue:45335", "issue:43404|issue:43454"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6474, "estimated_input_tokens": 3109, "item_count": 17, "node_count": 17, "serialized_chars": 12435, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:35:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2e3da52bb8828eac39ae3c2bf8addc18d856a290e9d3a56ebf20d4eb1d7f6ef6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:39404", "issue:39692", "issue:41720", "issue:42947", "issue:43381", "issue:43425", "issue:43873", "issue:43957", "issue:44155", "issue:44164", "issue:44291", "issue:44387", "issue:44402", "issue:44488", "issue:44756", "issue:44960"], "result": {"analyst_result": {"best_issue_reason": "Issue 44387 is the strongest standalone issue because it has a clear failure mode and an actionable, narrowly defined problem statement.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44387 is the most concrete, regression-like report in the set: it describes a specific int4 quantization OOM symptom with enough context to stand as the best representative issue.", "canonical_pr_reason": null, "confidence": 0.87, "soft_edge_verdicts": [{"accept": false, "left": "issue:36010", "reason": "Different bugs: one is an import path failure for GenerationMixin, the other is Whisper pipeline return_language behavior.", "right": "issue:39404"}, {"accept": false, "left": "issue:44164", "reason": "Both involve model loading, but they are distinct failures: extra_state handling vs _is_hf_initialized argument mismatch.", "right": "issue:44291"}, {"accept": false, "left": "issue:44402", "reason": "Related only at a broad tokenizer/model-loading level; one is a specific vocab mismatch, the other is a model loading failure for a particular checkpoint.", "right": "issue:44488"}, {"accept": false, "left": "issue:41720", "reason": "Completely different code paths and symptoms: CUDA assert during auto device mapping vs batched inference embedding/token leakage.", "right": "issue:44155"}, {"accept": false, "left": "issue:43873", "reason": "Both concern quantization/memory, but the concrete problems differ: offloading behavior vs increased reserved memory leading to OOM.", "right": "issue:44387"}, {"accept": false, "left": "issue:39692", "reason": "Unrelated: documentation/example errors for SigLIP2 vs a separate GLM5 issue.", "right": "issue:44960"}, {"accept": false, "left": "issue:44387", "reason": "Both mention OOM, but the causes are different: int4 quantization memory growth vs disabling mmap on Strix Halo.", "right": "issue:44756"}, {"accept": false, "left": "issue:43425", "reason": "Distinct incompatibilities: Torch 2.10 compatibility vs loading with torch.device(\"meta\") under Transformers 5.x.", "right": "issue:43957"}, {"accept": false, "left": "issue:42947", "reason": "Both involve gradient checkpointing, but one is ineffective with PEFT LoRA while the other is specifically about eval mode.", "right": "issue:43381"}], "summary": "These items form a loose Transformers-related theme, but the soft pairs do not look like true duplicates of the same underlying bug. Most are separate failure modes in loading, quantization, checkpointing, or model-specific behavior."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:36010|issue:39404", "issue:44164|issue:44291", "issue:44402|issue:44488", "issue:41720|issue:44155", "issue:43873|issue:44387", "issue:39692|issue:44960", "issue:44387|issue:44756", "issue:43425|issue:43957", "issue:42947|issue:43381"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6682, "estimated_input_tokens": 3213, "item_count": 17, "node_count": 17, "serialized_chars": 12849, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:35:48Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "399d320fd043c25fb49388fc7d642365f1a99b180fea85de38399474b1212c20", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43381", "issue:43646", "issue:43708", "issue:43854", "issue:43957", "issue:44164", "issue:44291", "issue:44292", "issue:44743", "issue:44756", "issue:44912", "issue:44928", "issue:44960", "issue:45216", "issue:45276", "issue:45292", "issue:45335"], "result": {"analyst_result": {"best_issue_reason": "issue:45292 best represents the cluster\u2019s shared embedding-resize problem while remaining generic enough to subsume the closest variant reports.", "best_pr_reason": null, "canonical_issue_reason": "issue:45292 is the cleanest, most general report of the resize_token_embeddings/output_embeddings bug; issue:45276 looks like a model-specific variant with extra affected fields, so 45292 is the best umbrella issue.", "canonical_pr_reason": null, "confidence": 0.71, "soft_edge_verdicts": [{"accept": false, "left": "issue:44292", "reason": "Both are quantized model-loading problems, but they target different models and symptoms (NVFP4 load error vs MXFP4 fallback to bf16), so they do not look like the same bug.", "right": "issue:44912"}, {"accept": false, "left": "issue:43854", "reason": "Different model families and different failure surfaces: one is a unit-test load issue for GLM-4.7-Flash, the other is a save_pretrained regression for Qwen3.5.", "right": "issue:45216"}, {"accept": false, "left": "issue:44291", "reason": "These are unrelated failures: unexpected _is_hf_initialized with init_empty_weights versus mmap/OOM behavior on Strix Halo.", "right": "issue:44756"}, {"accept": true, "left": "issue:45276", "reason": "Both report resize_token_embeddings not updating downstream embeddings; 45276 is the model-specific Gemma4 variant with extra affected fields, while 45292 is the generic core symptom.", "right": "issue:45292"}, {"accept": false, "left": "issue:44743", "reason": "The titles do not describe the same concrete bug; one is a recurrent-state/cache reset issue, the other is a generic GLM5 loading/model report.", "right": "issue:44960"}, {"accept": false, "left": "issue:43381", "reason": "These are fundamentally different: gradient checkpointing eval-mode behavior versus gradient explosion from 3D position_ids forcing SDPA math fallback.", "right": "issue:44928"}, {"accept": false, "left": "issue:43957", "reason": "Meta-device loading breakage and disabling mmap to avoid OOM are different code paths and different failure modes.", "right": "issue:44756"}, {"accept": false, "left": "issue:44164", "reason": "save/from_pretrained extra_state handling is unrelated to mmap/OOM on Strix Halo.", "right": "issue:44756"}, {"accept": false, "left": "issue:43646", "reason": "Custom model initialization breakage in Transformers 5.0 is not the same bug as resume_from_checkpoint calculating max_steps incorrectly.", "right": "issue:43708"}, {"accept": false, "left": "issue:45292", "reason": "Both involve resize_token_embeddings, but one is about output_embeddings and the other about decoder.embed_tokens; that is similar API area, not clearly the same concrete bug.", "right": "issue:45335"}], "summary": "Mostly false-positive similarity pairs; the only strong duplicate-looking pair is the resize_token_embeddings reports around output_embeddings. The rest differ by model, API, or concrete failure mode."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44292|issue:44912", "issue:43854|issue:45216", "issue:44291|issue:44756", "issue:45276|issue:45292", "issue:44743|issue:44960", "issue:43381|issue:44928", "issue:43957|issue:44756", "issue:44164|issue:44756", "issue:43646|issue:43708", "issue:45292|issue:45335"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6944, "estimated_input_tokens": 3344, "item_count": 18, "node_count": 18, "serialized_chars": 13373, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:36:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8b6fc53e7236b6e0cf2c525009701c133e479ba5bb5a3002651ce3cd7ccd67b7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43257", "issue:43425", "issue:43645", "issue:43716", "issue:43828", "issue:43906", "issue:43957", "issue:44164", "issue:44265", "issue:44402", "issue:44451", "issue:44479", "issue:44530", "issue:44589", "issue:44756", "issue:44863", "issue:44877", "issue:44898"], "result": {"analyst_result": {"best_issue_reason": "issue:44479 is the best representative issue in this mixed cluster because it is the most specific and actionable bug report, even though it is not a duplicate of the others.", "best_pr_reason": null, "canonical_issue_reason": "issue:44479 is the strongest standalone report here: it describes a concrete regression, names multiple affected Qwen variants, and has clearer scope than the other unrelated issues.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:44479", "reason": "Different bugs: one is a video-input regression for Qwen VL/MoE models, the other is strict config validation blocking granite_speech loading.", "right": "issue:44877"}, {"accept": false, "left": "issue:44402", "reason": "Both involve model/tokenizer loading, but one is a specific vocab_size mismatch for rmihaylov/bert-base-bg and the other is a generic load failure for vesteinn/ScandiBERT.", "right": "issue:44451"}, {"accept": false, "left": "issue:43645", "reason": "Custom-model initialization in Jupyter notebooks is a different problem from save/from_pretrained failing to preserve extra_state.", "right": "issue:44164"}, {"accept": false, "left": "issue:44265", "reason": "torch.export with torch_compilable_check and Perceiver non-default-resolution interpolation failures are different code paths and failure modes.", "right": "issue:44898"}, {"accept": false, "left": "issue:43425", "reason": "Torch 2.10 incompatibility is a broad dependency issue, not the same as the autocast dtype mismatch on Phi-tiny-MoE-instruct.", "right": "issue:43828"}, {"accept": false, "left": "issue:43257", "reason": "Qwen3 MoE weight conversion with accelerate/deepspeed and NemotronH checkpoint loading affect different model families and different loading logic.", "right": "issue:44863"}, {"accept": false, "left": "issue:43828", "reason": "Autocast dtype mismatch is not the same bug as meta-device loading breaking some models; they have different triggers and subsystems.", "right": "issue:43957"}, {"accept": false, "left": "issue:44589", "reason": "Float8 storage deserialization and disabling mmap on Strix Halo to avoid OOM are unrelated runtime issues.", "right": "issue:44756"}, {"accept": false, "left": "issue:43906", "reason": "An isolated reproduction of a prior issue and a PagedAttentionCache crash on Qwen3.5 do not point to the same concrete underlying bug.", "right": "issue:44530"}, {"accept": false, "left": "issue:43425", "reason": "Torch version incompatibility and Mistral-3 image preprocessor dtype mismatch are unrelated problems.", "right": "issue:43716"}], "summary": "This cluster is not a duplicate set: the issues span unrelated model-loading, dtype, config, export, and runtime problems. All soft pairs look like false positives with different failure modes or different affected code paths."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44479|issue:44877", "issue:44402|issue:44451", "issue:43645|issue:44164", "issue:44265|issue:44898", "issue:43425|issue:43828", "issue:43257|issue:44863", "issue:43828|issue:43957", "issue:44589|issue:44756", "issue:43906|issue:44530", "issue:43425|issue:43716"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6956, "estimated_input_tokens": 3350, "item_count": 18, "node_count": 18, "serialized_chars": 13398, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:36:34Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e6a9ab97e6a9435d36c98a539d6d2ceb01cabdaa60950fd84feeb5306884683e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39404", "issue:43299", "issue:43425", "issue:43596", "issue:43611", "issue:43645", "issue:43901", "issue:43931", "issue:44479", "issue:44509", "issue:44530", "issue:44589", "issue:44683", "issue:44805", "issue:44863", "issue:44912", "issue:44918", "issue:45084"], "result": {"analyst_result": {"best_issue_reason": "issue:43611 is the best single issue to anchor this cluster since it captures the widest, most central failure mode (v5 loading regressions) without being overly model-specific.", "best_pr_reason": null, "canonical_issue_reason": "issue:43611 is the broadest representative of the cluster because it is a core Transformers 5.0 model-loading regression, which is the closest shared theme across several issues.", "canonical_pr_reason": null, "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "issue:39404", "reason": "Different problems: Whisper pipeline `return_language` behavior vs MXFP4 quantization fallback for git-oss-20b; no shared code path.", "right": "issue:44912"}, {"accept": false, "left": "issue:43931", "reason": "Both are model-loading failures, but for different model families and symptoms: Qwen3-VL shape mismatch vs NemotronH checkpoint loading. Not the same bug.", "right": "issue:44863"}, {"accept": false, "left": "issue:43611", "reason": "Both are Transformers 5.0 loading regressions, but one is `base_model_prefix` handling and the other is custom models in Jupyter notebooks. Too different to merge.", "right": "issue:43645"}, {"accept": false, "left": "issue:44530", "reason": "Different failure modes: `PagedAttentionCache`/`linear_attention` on Qwen3.5 vs compilation error for non-template nodes. No concrete overlap.", "right": "issue:45084"}, {"accept": false, "left": "issue:43901", "reason": "Both are docs issues, but they concern different pipeline/task deprecations and different text. Same area, not the same change.", "right": "issue:44509"}, {"accept": false, "left": "issue:43299", "reason": "Both mention Qwen3-VL loading, but one is a dev version breakage for MoE models while the other is a specific weight-shape mismatch on a different checkpoint. Not identical.", "right": "issue:43931"}, {"accept": false, "left": "issue:44479", "reason": "A video-input regression for several Qwen variants is not the same as an attention-mask shape mismatch. Related ecosystem, different bug.", "right": "issue:44805"}, {"accept": false, "left": "issue:44589", "reason": "Unrelated errors: missing Float8 storage object vs non-template-node compilation failure.", "right": "issue:45084"}, {"accept": false, "left": "issue:43596", "reason": "Different subsystems and symptoms: DeepSpeed ZeRO-3/BertModel index error vs compiled flex_attention on torch >= 2.9.", "right": "issue:44683"}, {"accept": false, "left": "issue:43425", "reason": "Torch compatibility complaint vs Qwen3.5/TRL SFT input embedding unpacking failure; no shared underlying change.", "right": "issue:44918"}], "summary": "These items are mostly unrelated regressions and doc bugs; the only loose commonality is that several involve Transformers v5 model loading or pipeline behavior, but none of the soft pairs look like the same concrete underlying bug/change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:39404|issue:44912", "issue:43931|issue:44863", "issue:43611|issue:43645", "issue:44530|issue:45084", "issue:43901|issue:44509", "issue:43299|issue:43931", "issue:44479|issue:44805", "issue:44589|issue:45084", "issue:43596|issue:44683", "issue:43425|issue:44918"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7140, "estimated_input_tokens": 3442, "item_count": 18, "node_count": 18, "serialized_chars": 13768, "soft_pair_count": 11}, "cached_at": "2026-04-14T12:36:48Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "55ddbd35dba0903d5d526a7d158c3f91819ea0443cd01851f7750bafc68df59d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:32090", "issue:36032", "issue:39404", "issue:40990", "issue:42947", "issue:43452", "issue:43582", "issue:43632", "issue:43856", "issue:43957", "issue:43994", "issue:44079", "issue:44164", "issue:44488", "issue:44671", "issue:44756", "issue:44991", "issue:45081"], "result": {"analyst_result": {"best_issue_reason": "issue:43957 best captures the cluster\u2019s most general, reusable failure mode; the others are narrower and model-specific.", "best_pr_reason": null, "canonical_issue_reason": "issue:43957 is the broadest and most representative report here: a transformers v5 model-loading regression affecting multiple models, which makes it the best single issue to anchor the set.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43856", "reason": "Both mention memory/OOM, but one is Qwen3 MoE training inefficiency and the other is Strix Halo mmap-related OOM. Different trigger, platform, and fix path.", "right": "issue:44756"}, {"accept": false, "left": "issue:43994", "reason": "43994 is a model/pipeline inference correctness issue for SigLIP2, while 44079 is a generic ModelOutput key-assignment bug. Related area, but not the same bug.", "right": "issue:44079"}, {"accept": false, "left": "issue:40990", "reason": "One is high perplexity on gpt-oss-20b evals; the other is gradient checkpointing not working with PEFT LoRA. Different symptoms and code paths.", "right": "issue:42947"}, {"accept": false, "left": "issue:44671", "reason": "Both involve v5 and model/tokenizer behavior, but CamemBERT masked-LM predictions and EMBEDDIA/est-roberta tokenizer loading are distinct failures.", "right": "issue:44991"}, {"accept": false, "left": "issue:36032", "reason": "T5Tokenizer special-token method conflict and gguf_file loading failures are different tokenizer/model loading problems with different root causes.", "right": "issue:43452"}, {"accept": false, "left": "issue:39404", "reason": "Whisper pipeline return_language regression is unrelated to CamemBERT masked-LM prediction errors.", "right": "issue:44671"}, {"accept": false, "left": "issue:44991", "reason": "Different tokenizer failures: one is a transformers>=5 loading regression for EMBEDDIA/est-roberta, the other is a Mistral regex patch crash on missing backend_tokenizer.", "right": "issue:45081"}, {"accept": false, "left": "issue:43582", "reason": "Apple Silicon allocator TypeError and _is_hf_initialized breakage are separate runtime regressions with different affected subsystems.", "right": "issue:43632"}, {"accept": false, "left": "issue:44488", "reason": "Both are tokenizer-loading failures, but for different models and different error paths (sleng-bert vs Mistral regex patch).", "right": "issue:45081"}, {"accept": false, "left": "issue:43957", "reason": "meta-device loading regressions and extra_state save/from_pretrained handling are distinct serialization/loading issues.", "right": "issue:44164"}, {"accept": false, "left": "issue:32090", "reason": "Trainer gpu_broadcast NoneType error and Whisper return_language pipeline regression are unrelated.", "right": "issue:39404"}], "summary": "These issues are mostly unrelated one-off regressions or model-specific bugs. They share only broad themes like loading failures or inference regressions, but none of the soft pairs look like the same underlying bug/change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43856|issue:44756", "issue:43994|issue:44079", "issue:40990|issue:42947", "issue:44671|issue:44991", "issue:36032|issue:43452", "issue:39404|issue:44671", "issue:44991|issue:45081", "issue:43582|issue:43632", "issue:44488|issue:45081", "issue:43957|issue:44164", "issue:32090|issue:39404"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6456, "estimated_input_tokens": 3100, "item_count": 17, "node_count": 17, "serialized_chars": 12399, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:37:02Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2bf0a2516a1b73efa4991cc5be4c6ee3fdff14b671e762893614920a88752f9d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:40990", "issue:43421", "issue:43425", "issue:43611", "issue:43644", "issue:43716", "issue:43883", "issue:44164", "issue:44206", "issue:44220", "issue:44265", "issue:44479", "issue:44488", "issue:44610", "issue:44987", "issue:45084", "issue:45161"], "result": {"analyst_result": {"best_issue_reason": "Issue 43611 is the best representative only in a loose sense: it is a clear, general Transformers 5.0 loading regression and sits near the most recurrent theme in this set. It is still not a good global canonical because the other issues describe different subsystems and failure modes.", "best_pr_reason": null, "canonical_issue_reason": "No strong canonical duplicate exists. If one must be chosen, issue 43611 is the broadest and most central to the apparent Transformers 5.0 model-loading regression subset, but the overall cluster is too heterogeneous to justify a single true canonical issue.", "canonical_pr_reason": null, "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "issue:40990", "reason": "Different bugs: perplexity on GPT-OSS/WikiText-2 versus runtime special-token post-processing not updating in TokenizersBackend. Same library area, but not the same underlying defect.", "right": "issue:43421"}, {"accept": false, "left": "issue:43425", "reason": "One is a Torch 2.10 compatibility problem; the other is a TP-only failure in GPT-OSS MoE. These are different compatibility/code-path issues.", "right": "issue:45161"}, {"accept": false, "left": "issue:43883", "reason": "Both are model-loading failures, but one is a missing attribute on MolmoForCausalLM and the other is a specific cjvt/sleng-bert load regression. No evidence they share the same root cause.", "right": "issue:44488"}, {"accept": false, "left": "issue:44164", "reason": "save/from_pretrained extra_state handling is a serialization issue; the Qwen video-input regression is a multimodal preprocessing/input-shape problem. Different code paths.", "right": "issue:44479"}, {"accept": false, "left": "issue:44265", "reason": "torch.export failures with torch_compilable_check are export/compiler-related, while OmDet-Turbo\u2019s 640px vs 224px mismatch is a processor/model size regression.", "right": "issue:44610"}, {"accept": false, "left": "issue:44488", "reason": "Both involve model loading, but they target different models and likely different failure causes. Too broad to merge as one bug.", "right": "issue:44987"}, {"accept": false, "left": "issue:43716", "reason": "Image preprocessor dtype mismatch for Mistral-3 is unrelated to the 'Can't compile non template nodes' compilation error.", "right": "issue:45084"}, {"accept": false, "left": "issue:44206", "reason": "Both are audio-feature related, but one is a LasrFeatureExtractor regression passing an unsupported arg, while the other is a separate _torch_extract_fbank_features() issue. Not enough evidence of one root cause.", "right": "issue:44220"}, {"accept": false, "left": "issue:43611", "reason": "Both are Transformers 5.0 regressions, but one breaks loading via base_model_prefix and the other corrupts non-persistent buffers. Different state-handling bugs, so do not merge.", "right": "issue:43644"}], "summary": "This cluster is not a true duplicate set: most items are unrelated regressions across loading, tokenization, audio/image preprocessing, export, and model-specific behavior. The only partial thematic overlap is around Transformers 5.x regressions, but the concrete failure modes differ."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:40990|issue:43421", "issue:43425|issue:45161", "issue:43883|issue:44488", "issue:44164|issue:44479", "issue:44265|issue:44610", "issue:44488|issue:44987", "issue:43716|issue:45084", "issue:44206|issue:44220", "issue:43611|issue:43644"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6486, "estimated_input_tokens": 3115, "item_count": 17, "node_count": 17, "serialized_chars": 12458, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:37:16Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1ebbb29ec022e30fa1790e430d2a9e92d567685130f265ecee6aa48a2fdc17bf", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34567", "issue:40444", "issue:42947", "issue:43317", "issue:43452", "issue:43482", "issue:43541", "issue:43643", "issue:43644", "issue:43646", "issue:43883", "issue:44451", "issue:44530", "issue:44756", "issue:44987", "issue:44991", "issue:45084"], "result": {"analyst_result": {"best_issue_reason": "Issue 43452 is the strongest representative because it describes the core loading-path failure in a model-agnostic way, making it the best duplicate target among the provided issues.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43452 is the broader, more generic report of the GGUF/from_pretrained loading breakage, so it is the best canonical issue if these two are grouped together.", "canonical_pr_reason": null, "confidence": 0.62, "soft_edge_verdicts": [{"accept": false, "left": "issue:42947", "reason": "Different bugs: gradient checkpointing/LoRA behavior versus mmap-induced OOM on Strix Halo.", "right": "issue:44756"}, {"accept": false, "left": "issue:43883", "reason": "Unrelated failures: missing all_tied_weights_keys on a model class versus tokenizer loading failure for a specific model.", "right": "issue:44991"}, {"accept": false, "left": "issue:34567", "reason": "Completely different areas: TrainerState token accounting versus multimodal IterableDataset loading.", "right": "issue:40444"}, {"accept": true, "left": "issue:43452", "reason": "Both describe the same GGUF loading regression in the Transformers v5/from_pretrained path; 43482 is a model-specific manifestation of 43452.", "right": "issue:43482"}, {"accept": false, "left": "issue:43541", "reason": "Different subsystems and symptoms: torch dynamo grouped_mm tracing error versus template-node compilation failure.", "right": "issue:45084"}, {"accept": false, "left": "issue:44451", "reason": "Both are loading failures, but they concern different models and likely different code paths; too broad to treat as the same bug.", "right": "issue:44987"}, {"accept": false, "left": "issue:43643", "reason": "Distinct issues: trust_remote_code config field loss versus PagedAttentionCache linear_attention crash.", "right": "issue:44530"}, {"accept": false, "left": "issue:43644", "reason": "Related only at a very high level (Transformers 5 breakage), but the concrete failures differ: junk non-persistent buffers versus broken custom initialization.", "right": "issue:43646"}, {"accept": false, "left": "issue:43317", "reason": "Different failure modes: device_map auto dequantized model offload versus GGUF loading with Transformers v5.", "right": "issue:43482"}], "summary": "Mostly unrelated issues; only the GGUF loading reports look like the same underlying regression. No PRs are present."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42947|issue:44756", "issue:43883|issue:44991", "issue:34567|issue:40444", "issue:43452|issue:43482", "issue:43541|issue:45084", "issue:44451|issue:44987", "issue:43643|issue:44530", "issue:43644|issue:43646", "issue:43317|issue:43482"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7080, "estimated_input_tokens": 3412, "item_count": 18, "node_count": 18, "serialized_chars": 13647, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:37:33Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "13d292893545837a19a7e19d4d083111a571c849303cd2d7d23cdf85a97d783f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36032", "issue:39404", "issue:42915", "issue:43262", "issue:43377", "issue:43404", "issue:43645", "issue:43646", "issue:43825", "issue:44403", "issue:44466", "issue:44530", "issue:44534", "issue:44849", "issue:44987", "issue:44991", "issue:45081", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": "Issue 43645 is the best single issue to anchor on: it is explicit, user-visible, and more general than the narrower custom-model follow-up (43646).", "best_pr_reason": null, "canonical_issue_reason": "Issue 43645 is the strongest representative of the set because it describes a broad, concrete Transformers 5 regression affecting custom-model initialization in notebooks, with clear user impact and a well-scoped failure mode.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:36032", "reason": "Unrelated failures: a T5 tokenizer loading conflict versus a Qwen3Moe FP8 config issue. No shared code path.", "right": "issue:42915"}, {"accept": false, "left": "issue:43262", "reason": "Audio chat-template sampling-rate handling and MIMI batch padding-mask behavior are different bugs in different components.", "right": "issue:43377"}, {"accept": false, "left": "issue:44849", "reason": "Different models and mechanisms: Qwen3.5 hidden-states output bug versus Qwen2.5-VL rope/position-id scaling.", "right": "issue:45325"}, {"accept": false, "left": "issue:39404", "reason": "Whisper pipeline return_language regression is not the same as a v5 pipeline error-message wording problem about translation tasks.", "right": "issue:43825"}, {"accept": false, "left": "issue:42915", "reason": "FP8 configuration failure and PagedAttention linear_attention crash are distinct model/runtime problems.", "right": "issue:44530"}, {"accept": false, "left": "issue:44987", "reason": "Both are loading failures, but for different models and likely different loader paths; not the same concrete bug.", "right": "issue:44991"}, {"accept": false, "left": "issue:36032", "reason": "Tokenizer add_special_tokens conflict is unrelated to custom model definition/initialization in notebooks.", "right": "issue:43645"}, {"accept": false, "left": "issue:43404", "reason": "Both mention lm_head/tied weights, but one is a model-specific missing tie and the other is device-dependent serialization behavior.", "right": "issue:44466"}, {"accept": false, "left": "issue:43646", "reason": "Custom model initialization breakage is unrelated to non-persistent buffers being filled with junk.", "right": "issue:44534"}, {"accept": false, "left": "issue:44403", "reason": "Noise on model loading and a Mistral regex-patch tokenizer crash are different issues with different fixes.", "right": "issue:45081"}], "summary": "None of the soft-edge pairs look like the same underlying bug or change; they mostly share broad subsystems or keyword overlap only."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:36032|issue:42915", "issue:43262|issue:43377", "issue:44849|issue:45325", "issue:39404|issue:43825", "issue:42915|issue:44530", "issue:44987|issue:44991", "issue:36032|issue:43645", "issue:43404|issue:44466", "issue:43646|issue:44534", "issue:44403|issue:45081"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6778, "estimated_input_tokens": 3261, "item_count": 18, "node_count": 18, "serialized_chars": 13044, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:37:53Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fd59786c53d62cc2c248c0648d303be66eec63c78b0bc3a657b8600dd6290c75", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41553", "issue:42915", "issue:43322", "issue:43377", "issue:43540", "issue:43582", "issue:43645", "issue:43819", "issue:43994", "issue:44220", "issue:44451", "issue:44534", "issue:44610", "issue:44683", "issue:45081", "issue:45084", "issue:45216", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": "issue:45216 is the strongest representative of the only true duplicate pair in the set, since it captures the root checkpoint-serialization regression and can subsume the later follow-up.", "best_pr_reason": null, "canonical_issue_reason": "issue:45216 is the best canonical candidate because it is the earlier, broader report of the Qwen3.5 `save_pretrained` regression; issue:45357 reads like a narrower follow-up about the same incorrect visual encoder keys.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43994", "reason": "Both are vision-model issues, but they concern different models and different failures: SigLIP2 loading vs OmDet-Turbo image size mismatch.", "right": "issue:44610"}, {"accept": false, "left": "issue:44451", "reason": "Both involve tokenizer/loading behavior, but ScandiBERT load failure and Mistral regex patch crash are different code paths and bugs.", "right": "issue:45081"}, {"accept": false, "left": "issue:43322", "reason": "Both are multimodal model reports, but one is a Llava Next segfault and the other is a Qwen3OmniMoe video-input ValueError; not the same bug.", "right": "issue:43540"}, {"accept": true, "left": "issue:45216", "reason": "Same underlying Qwen3.5 `save_pretrained` regression: both report incorrect checkpoint/visual encoder keys, with the latter appearing to be a narrower follow-up.", "right": "issue:45357"}, {"accept": false, "left": "issue:43645", "reason": "Both mention Transformers v5 regressions, but one is Jupyter custom-model initialization and the other is non-persistent buffers being filled incorrectly.", "right": "issue:44534"}, {"accept": false, "left": "issue:44220", "reason": "Different failure modes in different subsystems: `_torch_extract_fbank_features()` vs compiled `flex_attention` on torch >= 2.9.", "right": "issue:44683"}, {"accept": false, "left": "issue:43377", "reason": "Different models and defects: MIMI padding-mask batching mismatch versus DAC `from_latents`/forward-pass mismatch.", "right": "issue:43819"}, {"accept": false, "left": "issue:41553", "reason": "Unrelated issues: Voxtral AutoTokenizer error messaging vs Apple Silicon `caching_allocator_warmup` TypeError.", "right": "issue:43582"}, {"accept": false, "left": "issue:42915", "reason": "Different bugs in different areas: Qwen3Moe FineGrainedFP8Config failure versus a template-node compile TypeError.", "right": "issue:45084"}], "summary": "These items are mostly unrelated bug reports across different models, processors, and runtime paths. The only clear duplicate-like pair is the two Qwen3.5 `save_pretrained` regression reports; the rest should not be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43994|issue:44610", "issue:44451|issue:45081", "issue:43322|issue:43540", "issue:45216|issue:45357", "issue:43645|issue:44534", "issue:44220|issue:44683", "issue:43377|issue:43819", "issue:41553|issue:43582", "issue:42915|issue:45084"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6604, "estimated_input_tokens": 3174, "item_count": 17, "node_count": 17, "serialized_chars": 12694, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:38:07Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4f529640b3502bb899c7e3dd0a4c87d71de66f118e426e9de9730511d4426d5b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:40990", "issue:43296", "issue:43317", "issue:43404", "issue:43531", "issue:43632", "issue:43856", "issue:44062", "issue:44265", "issue:44479", "issue:44756", "issue:44811", "issue:44987", "issue:45072", "issue:45081", "issue:45127", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:43632", "reason": "Different problems: one is an `_is_hf_initialized` flag regression in Transformers v5, the other is mmap-related OOM on Strix Halo.", "right": "issue:44756"}, {"accept": false, "left": "issue:44987", "reason": "Both involve loading/tokenizers, but the concrete failures differ: loading `physical-intelligence/fast` vs a Mistral tokenizer regex patch crash.", "right": "issue:45081"}, {"accept": false, "left": "issue:43296", "reason": "Unrelated model-specific issues: PaddleOCR-VL load failure vs Qwen3-MoE sliding_window behavior.", "right": "issue:43531"}, {"accept": false, "left": "issue:43404", "reason": "Both mention tied weights, but they are different code paths and symptoms: Mistral3 lm_head not tied vs LoRA merge collapse with extended vocab.", "right": "issue:45127"}, {"accept": false, "left": "issue:44062", "reason": "Tokenizer `AddedToken(special=...)` TypeError is unrelated to the `physical-intelligence/fast` loading failure.", "right": "issue:44987"}, {"accept": false, "left": "issue:44479", "reason": "Same broad Qwen VL family, but one is a video-input regression and the other is a still-image rope index scaling bug.", "right": "issue:45325"}, {"accept": false, "left": "issue:40990", "reason": "Completely different subsystems: perplexity on gpt-oss-20b vs Whisper `batch_decode()` skipping special tokens.", "right": "issue:44811"}, {"accept": false, "left": "issue:44265", "reason": "Different failures: `torch.export` with `torch_compilable_check` vs dtype mismatches in bf16 inference.", "right": "issue:45072"}, {"accept": false, "left": "issue:43317", "reason": "Different performance/runtime issues: dequantized model offload loading failure vs Qwen3 MoE training memory inefficiency.", "right": "issue:43856"}], "summary": "These items do not form a duplicate cluster; they span unrelated bug reports across model loading, tokenizer behavior, MoE memory, video/image processing, export, and runtime flags. All soft edge pairs should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43632|issue:44756", "issue:44987|issue:45081", "issue:43296|issue:43531", "issue:43404|issue:45127", "issue:44062|issue:44987", "issue:44479|issue:45325", "issue:40990|issue:44811", "issue:44265|issue:45072", "issue:43317|issue:43856"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6998, "estimated_input_tokens": 3371, "item_count": 18, "node_count": 18, "serialized_chars": 13481, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:38:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "715719627a9d41c1225a8581c3f2492707e7400426a51fdf3785027c97e50f7f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36032", "issue:40444", "issue:42915", "issue:43421", "issue:43531", "issue:43611", "issue:43632", "issue:43742", "issue:43818", "issue:44291", "issue:44534", "issue:44568", "issue:44589", "issue:44683", "issue:44877", "issue:45020", "issue:45325", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": "issue:45020 is the broadest open umbrella for recent-version breakage with `remote_code`, but it still only represents one slice of these reports and should not absorb the others as duplicates.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits the whole cluster; the reported failures are heterogeneous and only loosely related by framework/version.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:42915", "reason": "Both are loading failures in v5-era code, but one is Qwen3Moe/FineGrainedFP8Config and the other is an `_is_hf_initialized` init_empty_weights TypeError; different code paths and fixes.", "right": "issue:44291"}, {"accept": false, "left": "issue:43421", "reason": "Both involve special-token behavior, but one is runtime post-processor sync in TokenizersBackend while the other is a specific mdeberta tokenizer not adding BOS/EOS with `add_special_tokens=True`.", "right": "issue:44568"}, {"accept": false, "left": "issue:43611", "reason": "Both are Transformers v5 regressions, but `base_model_prefix` loading and non-persistent buffer initialization are unrelated bugs.", "right": "issue:44534"}, {"accept": false, "left": "issue:42915", "reason": "Different failures: FP8 config/model loading versus a missing Float8 storage object; no shared concrete bug or fix.", "right": "issue:44589"}, {"accept": false, "left": "issue:40444", "reason": "Both are Qwen2.5-VL/image-text issues, but one is multi-image IterableDataset finetuning failure and the other is still-image temporal position scaling in `get_rope_index`.", "right": "issue:45325"}, {"accept": false, "left": "issue:43632", "reason": "One concerns the `_is_hf_initialized` flag during loading, the other junk-filled non-persistent buffers; distinct regressions.", "right": "issue:44534"}, {"accept": false, "left": "issue:36032", "reason": "T5 tokenizer special-token method conflict and MobileLLM-125M key error are different loading/tokenizer problems with no common code path.", "right": "issue:43742"}, {"accept": false, "left": "issue:43818", "reason": "Completely different models and bugs: Video-LLaVA temporal attention/image-tower sharing versus Qwen3.5 visual encoder key saving.", "right": "issue:45357"}, {"accept": false, "left": "issue:44683", "reason": "flex_attention on torch>=2.9 is a backend/runtime compatibility issue, while `remote_code` breakage is a model-loading compatibility issue.", "right": "issue:45020"}, {"accept": false, "left": "issue:43531", "reason": "Qwen3-MoE sliding-window behavior and granite_speech strict-config loading are unrelated model-specific regressions.", "right": "issue:44877"}], "summary": "This cluster is not a duplicate set: the items share a broad Transformers-v5 / model-loading theme, but each describes a different concrete bug path (tokenizers, config loading, buffer init, FP8/storage, video/image models, etc.)."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42915|issue:44291", "issue:43421|issue:44568", "issue:43611|issue:44534", "issue:42915|issue:44589", "issue:40444|issue:45325", "issue:43632|issue:44534", "issue:36032|issue:43742", "issue:43818|issue:45357", "issue:44683|issue:45020", "issue:43531|issue:44877"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6850, "estimated_input_tokens": 3297, "item_count": 18, "node_count": 18, "serialized_chars": 13187, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:38:34Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "70a3506bc6579a51878b2b2801fc7c27697cea8147fe78432c9024d8aac8d431", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33666", "issue:35707", "issue:36032", "issue:40444", "issue:41720", "issue:43425", "issue:43482", "issue:43525", "issue:43792", "issue:44079", "issue:44186", "issue:44265", "issue:44403", "issue:44534", "issue:44589", "issue:44821", "issue:44991", "issue:45084"], "result": {"analyst_result": {"best_issue_reason": "Issue 40444 is the most concrete, reproducible report in the set (specific model, dataset shape, and failure condition), so it is the best standalone issue anchor\u2014but it is not a duplicate of the others.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:44821", "reason": "Different subsystems: loading AutoImageProcessor from a URL vs torch.compile/export template-node compilation failure.", "right": "issue:45084"}, {"accept": false, "left": "issue:43425", "reason": "Torch 2.10 compatibility error is unrelated to generic transformer loading noise.", "right": "issue:44403"}, {"accept": false, "left": "issue:36032", "reason": "Tokenizer method-name conflict in T5 is unrelated to Qwen3 auto device mapping CUDA asserts.", "right": "issue:41720"}, {"accept": false, "left": "issue:43792", "reason": "Whisper runtime failure and Float8 storage lookup error are different concrete failure paths.", "right": "issue:44589"}, {"accept": false, "left": "issue:44186", "reason": "LayoutLMv2 tokenizer padding/truncation crash is unrelated to torch.export failing on torch_compilable_check.", "right": "issue:44265"}, {"accept": false, "left": "issue:44403", "reason": "One is about noisy loading logs; the other is an actual tokenizer load failure for a specific checkpoint.", "right": "issue:44991"}, {"accept": false, "left": "issue:33666", "reason": "Both involve Qwen VL, but the reported failures differ: multi-GPU training vs iterable dataset with multiple images per prompt.", "right": "issue:40444"}, {"accept": false, "left": "issue:35707", "reason": "Progressive generation with inputs_embeds/past_key_values is a different generation-path bug than multimodal finetuning failures.", "right": "issue:40444"}, {"accept": false, "left": "issue:44079", "reason": "ModelOutput key assignment semantics are unrelated to non-persistent buffer initialization in v5.", "right": "issue:44534"}, {"accept": false, "left": "issue:43482", "reason": "GGUF loading in transformers v5 and missing pad_token_id on Llama4Config are separate compatibility issues.", "right": "issue:43525"}], "summary": "All candidates are distinct issue reports with different models, code paths, or failure modes; none of the soft pairs look like true duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44821|issue:45084", "issue:43425|issue:44403", "issue:36032|issue:41720", "issue:43792|issue:44589", "issue:44186|issue:44265", "issue:44403|issue:44991", "issue:33666|issue:40444", "issue:35707|issue:40444", "issue:44079|issue:44534", "issue:43482|issue:43525"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7134, "estimated_input_tokens": 3439, "item_count": 18, "node_count": 18, "serialized_chars": 13755, "soft_pair_count": 11}, "cached_at": "2026-04-14T12:38:49Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "70ebcd0c596e72166ed38fe928283530115140181331e604e5716a9173c20f5f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:40444", "issue:41720", "issue:42175", "issue:43296", "issue:43531", "issue:43606", "issue:43644", "issue:43749", "issue:43761", "issue:43819", "issue:43881", "issue:44079", "issue:44265", "issue:44291", "issue:44488", "issue:44589", "issue:45081", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "issue:44079 is the best representative issue because it describes a general framework-level regression rather than a narrow model- or environment-specific failure.", "best_pr_reason": null, "canonical_issue_reason": "issue:44079 is the most broadly applicable and central-seeming bug here: a generic `ModelOutput` key assignment regression with wider potential impact than the model-specific reports.", "canonical_pr_reason": null, "confidence": 0.41, "soft_edge_verdicts": [{"accept": false, "left": "issue:44291", "reason": "Both are `TypeError`-style load failures, but one is about an unexpected `_is_hf_initialized` argument and the other about a missing `Float8_e4m3fnStorage`; different code paths and failure modes.", "right": "issue:44589"}, {"accept": false, "left": "issue:43606", "reason": "CPU offload device mismatch during model execution is unrelated to `torch.export.export` failing on `torch_compilable_check`.", "right": "issue:44265"}, {"accept": false, "left": "issue:43606", "reason": "One is a device-mismatch/offload CI failure; the other is a DAC latent/forward mismatch. Same broad area only, not the same bug.", "right": "issue:43819"}, {"accept": false, "left": "issue:43749", "reason": "`FSDP_CPU_RAM_EFFICIENT_LOADING` and `ModelOutput` key assignment are different framework features with different symptoms.", "right": "issue:44079"}, {"accept": false, "left": "issue:43761", "reason": "CLIP hidden-state omission in v5 is model-forward behavior; `ModelOutput` key bookkeeping is a separate output-container regression.", "right": "issue:44079"}, {"accept": false, "left": "issue:43644", "reason": "Non-persistent buffer junk initialization is not the same as incorrect `ModelOutput` key assignment.", "right": "issue:44079"}, {"accept": false, "left": "issue:42175", "reason": "One concerns TensorFlow not being installed with a torch extra; the other is a specific model load failure in vLLM/transformers.", "right": "issue:43296"}, {"accept": false, "left": "issue:43881", "reason": "Both are model loading issues, but the titles point to different models and likely different tokenizer/config problems.", "right": "issue:44488"}, {"accept": false, "left": "issue:40444", "reason": "Both involve Qwen2.5-VL multimodal inputs, but one is finetuning with multiple images and the other is video `vision_position_ids`; similar subsystem, not clearly the same bug.", "right": "issue:45381"}, {"accept": false, "left": "issue:41720", "reason": "Qwen3 auto-device-map CUDA assert is unrelated to a Mistral tokenizer regex patch crash.", "right": "issue:45081"}, {"accept": false, "left": "issue:43531", "reason": "A `sliding_window` issue in Qwen3-MoE is a model behavior bug, not the same as the `init_empty_weights` unexpected-argument load error.", "right": "issue:44291"}], "summary": "This cluster is mostly a set of unrelated closed issues that only share superficial language around model loading, device/offload, or multimodal Qwen titles. I found no strong duplicate pairs among the soft candidates, so all soft edges are rejected. No pull requests are present in the cluster."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44291|issue:44589", "issue:43606|issue:44265", "issue:43606|issue:43819", "issue:43749|issue:44079", "issue:43761|issue:44079", "issue:43644|issue:44079", "issue:42175|issue:43296", "issue:43881|issue:44488", "issue:40444|issue:45381", "issue:41720|issue:45081", "issue:43531|issue:44291"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6524, "estimated_input_tokens": 3134, "item_count": 17, "node_count": 17, "serialized_chars": 12536, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:39:03Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7566b639ce7359e36c54503fb59117a53bc6f8e1d812be6dc2607f67ea7165fe", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42915", "issue:43454", "issue:43482", "issue:43611", "issue:43632", "issue:43646", "issue:43828", "issue:43994", "issue:44387", "issue:44466", "issue:44610", "issue:44617", "issue:44683", "issue:44898", "issue:45072", "issue:45325", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "`issue:43611` is the most suitable representative issue: it is general, clearly v5-related, and closer to the common regression theme than the narrower model-specific reports.", "best_pr_reason": null, "canonical_issue_reason": "`issue:43611` is the best central anchor because it describes the broadest v5 regression in model loading, which is the closest shared theme across the cluster.", "canonical_pr_reason": null, "confidence": 0.72, "soft_edge_verdicts": [{"accept": false, "left": "issue:45325", "reason": "Both are Qwen2.5-VL position-id regressions, but one is still-image temporal scaling and the other is video vision position ids; different code paths and symptoms.", "right": "issue:45381"}, {"accept": false, "left": "issue:44610", "reason": "Unrelated bugs: OmDet-Turbo input-size mismatch vs Qwen2.5-VL rope/position-id scaling.", "right": "issue:45325"}, {"accept": false, "left": "issue:43828", "reason": "Both mention dtype or torch-version failures, but one is an autocast mismatch in Phi MoE and the other is compiled flex_attention on torch >= 2.9; not the same bug.", "right": "issue:44683"}, {"accept": false, "left": "issue:44387", "reason": "Both are OOM reports, but they involve different models and likely different causes; memory symptoms alone are too broad to merge.", "right": "issue:44617"}, {"accept": false, "left": "issue:43611", "reason": "Both are v5 model-init/load regressions, but one is about `base_model_prefix` loading and the other custom model initialization; related area, not the same underlying bug.", "right": "issue:43646"}, {"accept": false, "left": "issue:43632", "reason": "`_is_hf_initialized` breakage and bf16 dtype mismatches are separate failures with different mechanisms.", "right": "issue:45072"}, {"accept": false, "left": "issue:43994", "reason": "Both are vision-model inference issues, but they affect different models and different failure modes (`AutoModel`/pipeline nonsense vs non-default-resolution interpolation failure).", "right": "issue:44898"}, {"accept": false, "left": "issue:42915", "reason": "Qwen3Moe FP8 config failure and Qwen2.5-GGUF loading failure are unrelated model/version problems.", "right": "issue:43482"}, {"accept": false, "left": "issue:43454", "reason": "Both touch `lm_head`/tied weights, but one is missing weight tying in AyaVision while the other is device-dependent serialization inconsistency; not one concrete bug.", "right": "issue:44466"}], "summary": "This cluster is mostly a mixed bag of Transformers v5 regression reports across different models and code paths, not a true duplicate set. The closest anchor is the broad model-loading regression around `base_model_prefix`, but the other items are mostly separate bugs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:45325|issue:45381", "issue:44610|issue:45325", "issue:43828|issue:44683", "issue:44387|issue:44617", "issue:43611|issue:43646", "issue:43632|issue:45072", "issue:43994|issue:44898", "issue:42915|issue:43482", "issue:43454|issue:44466"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6872, "estimated_input_tokens": 3308, "item_count": 18, "node_count": 18, "serialized_chars": 13230, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:39:15Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b39a064a2c28e4a31a12639f21f9588f63691d409738e0c299289cbfde940987", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34634", "issue:35707", "issue:43262", "issue:43317", "issue:43526", "issue:43746", "issue:43792", "issue:43873", "issue:43881", "issue:43883", "issue:43957", "issue:44186", "issue:44291", "issue:44877", "issue:44898", "issue:44991", "issue:45072", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": "issue:43873 has the clearest cross-cutting bug framing and active discussion, making it the best single issue to represent this cluster.", "best_pr_reason": null, "canonical_issue_reason": "issue:43873 is the broadest and most reusable representative: it describes an open offloading/quantization failure with enough generality to cover a wider class of reports than the model-specific tickets.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43792", "reason": "Both involve model loading failures, but one is Whisper-specific and the other is a meta-device loading regression affecting different models/code paths.", "right": "issue:43957"}, {"accept": false, "left": "issue:43526", "reason": "These are unrelated: one is a BeitImageProcessorFast label-reduction bug, the other is a Molmo tied-weights attribute error.", "right": "issue:43883"}, {"accept": false, "left": "issue:44186", "reason": "Different subsystems and failure modes: tokenizer crashes on NER/padding versus dtype mismatches in bfloat16 inference.", "right": "issue:45072"}, {"accept": false, "left": "issue:43881", "reason": "Both are loading-related, but one is a multimodal model load failure and the other is a tokenizer load regression for a different model family.", "right": "issue:44991"}, {"accept": false, "left": "issue:43317", "reason": "Both mention offloading/quantization, but the titles point to different scenarios: dequantized model load with device_map=auto versus generic offloading not working as expected.", "right": "issue:43873"}, {"accept": false, "left": "issue:43262", "reason": "Different bug classes: audio chat-template sample-rate defaults versus GraniteSpeech PEFT checkpoint loading.", "right": "issue:43746"}, {"accept": false, "left": "issue:44898", "reason": "Both are vision-related shape/position issues, but they affect different models and different internals (Perceiver interpolation vs Qwen2.5-VL rope indexing).", "right": "issue:45325"}, {"accept": false, "left": "issue:44291", "reason": "One is an init_empty_weights argument incompatibility, the other is strict config validation blocking granite_speech config loading; not the same code-path.", "right": "issue:44877"}, {"accept": false, "left": "issue:34634", "reason": "Completely different areas: BarkProcessor voice_preset handling versus progressive generation with inputs_embeds and past_key_values.", "right": "issue:35707"}], "summary": "These are mostly unrelated issue reports across audio preprocessing, loading, tokenization, offloading, and vision/model-specific bugs. None of the soft pairs look like the same concrete underlying defect, so they should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43792|issue:43957", "issue:43526|issue:43883", "issue:44186|issue:45072", "issue:43881|issue:44991", "issue:43317|issue:43873", "issue:43262|issue:43746", "issue:44898|issue:45325", "issue:44291|issue:44877", "issue:34634|issue:35707"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7062, "estimated_input_tokens": 3403, "item_count": 18, "node_count": 18, "serialized_chars": 13610, "soft_pair_count": 11}, "cached_at": "2026-04-14T12:39:31Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d4e362f7e034298cbd32a9f9f48f043c18408846643ff45ef798a0d34795e61b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:35707", "issue:39692", "issue:40444", "issue:41720", "issue:43296", "issue:43377", "issue:43550", "issue:43632", "issue:43720", "issue:43749", "issue:43792", "issue:44164", "issue:44534", "issue:44610", "issue:44683", "issue:44877", "issue:44898", "issue:44991"], "result": {"analyst_result": {"best_issue_reason": "Issue 43632 is the most general and central among the listed issues; it describes a core Transformers v5 regression with broader downstream impact than the other, more model-specific reports.", "best_pr_reason": null, "canonical_issue_reason": "If one issue must represent the cluster, issue 43632 is the broadest framework-level regression and the closest to a reusable umbrella for related loading/initialization breakage, though the cluster overall is not a true duplicate set.", "canonical_pr_reason": null, "confidence": 0.79, "soft_edge_verdicts": [{"accept": false, "left": "issue:43550", "reason": "Both involve model/runtime failures, but one is about Bamba SDPA with torch.compile and the other about BitNet packed weights during accelerate loading; different code paths and fixes.", "right": "issue:43720"}, {"accept": false, "left": "issue:43632", "reason": "Both touch loading/saving behavior, but one is about the `_is_hf_initialized` flag in v5 and the other about preserving `extra_state`; related area, not the same bug.", "right": "issue:44164"}, {"accept": false, "left": "issue:35707", "reason": "Progressive generation with `inputs_embeds`/`past_key_values` is unrelated to Qwen3 auto device mapping triggering cudaErrorAssert.", "right": "issue:41720"}, {"accept": false, "left": "issue:44610", "reason": "Both concern vision input sizing, but they affect different models and failures: processor/model size mismatch vs Perceiver non-default-resolution interpolation.", "right": "issue:44898"}, {"accept": false, "left": "issue:43296", "reason": "Both are model-loading failures, but one is PaddleOCR-VL in vLLM and the other is tokenizer loading for EMBEDDIA/est-roberta; different components.", "right": "issue:44991"}, {"accept": false, "left": "issue:43296", "reason": "PaddleOCR-VL load failure and strict config rejecting `granite_speech` are separate compatibility issues with different failure modes.", "right": "issue:44877"}, {"accept": false, "left": "issue:43377", "reason": "Missing padding-mask support in MIMI encoder batched inference is not the same bug as OmDet-Turbo producing 640px inputs when 224px is expected.", "right": "issue:44610"}, {"accept": false, "left": "issue:43377", "reason": "These are both shape/consistency bugs, but they occur in different models and pipelines; no shared underlying code-path is evident.", "right": "issue:44898"}, {"accept": false, "left": "issue:43792", "reason": "Whisper runtime failure and compiled flex_attention breaking on torch>=2.9 are different problems with different triggers and fixes.", "right": "issue:44683"}, {"accept": false, "left": "issue:43749", "reason": "FSDP CPU RAM efficient loading and non-persistent buffer corruption are separate v5/loading regressions, not one concrete bug.", "right": "issue:44534"}, {"accept": false, "left": "issue:39692", "reason": "A SigLIP2 docs example with model/processor mismatch and quantization issues is not the same as Qwen2.5-VL finetuning with multi-image iterable datasets.", "right": "issue:40444"}], "summary": "This cluster is very heterogeneous: most items are distinct model-specific bugs, loading regressions, or documentation issues that only share broad transformer-related wording. None of the soft pairs look like true duplicates of the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43550|issue:43720", "issue:43632|issue:44164", "issue:35707|issue:41720", "issue:44610|issue:44898", "issue:43296|issue:44991", "issue:43296|issue:44877", "issue:43377|issue:44610", "issue:43377|issue:44898", "issue:43792|issue:44683", "issue:43749|issue:44534", "issue:39692|issue:40444"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6912, "estimated_input_tokens": 3328, "item_count": 17, "node_count": 17, "serialized_chars": 13312, "soft_pair_count": 11}, "cached_at": "2026-04-14T12:39:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "761050d527113d596d1c6816f9cb39120bcc9d58fedda3f5bdd355257f6527e0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41720", "issue:43322", "issue:43404", "issue:43454", "issue:43526", "issue:43550", "issue:43606", "issue:43720", "issue:43881", "issue:44186", "issue:44423", "issue:44466", "issue:44610", "issue:44977", "issue:45072", "issue:45357", "issue:45406"], "result": {"analyst_result": {"best_issue_reason": "Issue 44423 is the best single representative because it describes a broadly impactful multimodal serve crash with a concrete error and wider user-facing surface than the more model-specific reports.", "best_pr_reason": null, "canonical_issue_reason": "No clear duplicate cluster exists here. If one issue must represent the set, issue 44423 is the broadest runtime-serving regression and is the most generally useful entry to inspect first.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:44466", "reason": "Both are serialization-related, but one is about tied lm_head weights while the other is about incorrect visual encoder keys; different code paths and failure modes.", "right": "issue:45357"}, {"accept": false, "left": "issue:43550", "reason": "Different models and different bugs: torch.compile/SDPA failure versus CPU offload device mismatch.", "right": "issue:43606"}, {"accept": false, "left": "issue:43454", "reason": "AyaVision tied-weight generation bug and bfloat16 dtype mismatch in other models are not the same underlying defect.", "right": "issue:45072"}, {"accept": false, "left": "issue:44423", "reason": "Both hit transformers serve, but the crashes involve different processors/attributes and likely different initialization paths; too different to merge.", "right": "issue:45406"}, {"accept": false, "left": "issue:43454", "reason": "A tied lm_head bug is unrelated to a torch.compile SDPA failure.", "right": "issue:43550"}, {"accept": false, "left": "issue:44186", "reason": "Tokenizer crashes on NER/padding are unrelated to OmDet-Turbo processor image-size mismatch.", "right": "issue:44610"}, {"accept": false, "left": "issue:41720", "reason": "Qwen3 auto device mapping cudaErrorAssert and Llava Next segmentation fault are different model-loading failures.", "right": "issue:43322"}, {"accept": false, "left": "issue:43404", "reason": "Mistral3 lm_head tying is a generation-weight issue; incorrect visual encoder save keys are a serialization schema bug.", "right": "issue:45357"}, {"accept": false, "left": "issue:41720", "reason": "Both mention Qwen3/Qwen3.5, but one is an auto device-mapping CUDA assert and the other is a flash-attention generation bug.", "right": "issue:44977"}, {"accept": false, "left": "issue:43526", "reason": "BeitImageProcessorFast label reduction and glm-4v-9b load failure are unrelated processor/model issues.", "right": "issue:43881"}, {"accept": false, "left": "issue:43454", "reason": "AyaVision tied weights and BitNet packed-weight unpacking are distinct mechanisms and cannot plausibly be one PR.", "right": "issue:43720"}], "summary": "These issues are mostly unrelated regressions across different models and subsystems; none of the soft-edge pairs look like the same underlying bug, so all should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44466|issue:45357", "issue:43550|issue:43606", "issue:43454|issue:45072", "issue:44423|issue:45406", "issue:43454|issue:43550", "issue:44186|issue:44610", "issue:41720|issue:43322", "issue:43404|issue:45357", "issue:41720|issue:44977", "issue:43526|issue:43881", "issue:43454|issue:43720"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6808, "estimated_input_tokens": 3276, "item_count": 18, "node_count": 18, "serialized_chars": 13101, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:39:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "cddfaa0f1d78bc0dddf8cb60ad6efb5e1c97eb70c3ed22eb86f3f64f07a0f417", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34567", "issue:40990", "issue:42617", "issue:42915", "issue:43377", "issue:43577", "issue:43606", "issue:43708", "issue:43720", "issue:43873", "issue:44186", "issue:44361", "issue:44479", "issue:44756", "issue:44811", "issue:44877", "issue:44918", "issue:44991"], "result": {"analyst_result": {"best_issue_reason": "None of the issues is a good global representative for the cluster; they are too heterogeneous to collapse into one canonical issue.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits this cluster because the issues describe different root causes and code paths rather than one shared bug.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:34567", "reason": "Trainer token-count bookkeeping is unrelated to a GPT-OSS perplexity regression on WikiText-2.", "right": "issue:40990"}, {"accept": false, "left": "issue:43873", "reason": "Both involve offloading/memory, but one is about quantization behavior and the other is a Strix Halo mmap OOM workaround; different failure modes and fixes.", "right": "issue:44756"}, {"accept": false, "left": "issue:43377", "reason": "MIMI encoder padding-mask batching bug and Whisper batch_decode skip_special_tokens bug are separate model/component-specific issues.", "right": "issue:44811"}, {"accept": false, "left": "issue:43577", "reason": "BLIP2 dtype loading mismatch and Trainer resume/max_steps recalculation are unrelated subsystems with different root causes.", "right": "issue:43708"}, {"accept": false, "left": "issue:44186", "reason": "LayoutLMv2 tokenizer crashes on NER/batching and MLukeTokenizer AttributeError are distinct tokenizer bugs.", "right": "issue:44361"}, {"accept": false, "left": "issue:44479", "reason": "A video-input regression across Qwen VL models is not the same as Qwen3.5 embedding unpacking with TRL SFT trainer.", "right": "issue:44918"}, {"accept": false, "left": "issue:42617", "reason": "Failure to run 3d_parallel.py and Qwen3Moe FineGrainedFP8Config failure are different execution/model-configuration problems.", "right": "issue:42915"}, {"accept": false, "left": "issue:44877", "reason": "Strict config blocking granite_speech loading and tokenizer loading failure for EMBEDDIA/est-roberta are separate config vs tokenizer issues.", "right": "issue:44991"}, {"accept": false, "left": "issue:43606", "reason": "CPU-offload device mismatch in bark-small and BitNet packed-weight unpacking during accelerate loading are different model-loading bugs.", "right": "issue:43720"}], "summary": "The cluster is not a duplicate set: it mixes unrelated bugs in training state, perplexity, script execution, model loading/offload, tokenizer behavior, quantization, and hardware-specific memory issues. All suggested soft links should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:34567|issue:40990", "issue:43873|issue:44756", "issue:43377|issue:44811", "issue:43577|issue:43708", "issue:44186|issue:44361", "issue:44479|issue:44918", "issue:42617|issue:42915", "issue:44877|issue:44991", "issue:43606|issue:43720"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6590, "estimated_input_tokens": 3167, "item_count": 17, "node_count": 17, "serialized_chars": 12667, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:40:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "400b8e1ff2dd70c6836a79ac6dff010a62e70b966d253dd69ff9866a90c42a83", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36331", "issue:42617", "issue:42915", "issue:42994", "issue:43316", "issue:43317", "issue:43482", "issue:43531", "issue:43550", "issue:43572", "issue:43606", "issue:43653", "issue:44671", "issue:44743", "issue:44898", "issue:44913", "issue:45072"], "result": {"analyst_result": {"best_issue_reason": "issue:45072 is the strongest standalone report in this set: it is a concrete CI/inference bug, has multiple inbound references, and describes a specific dtype-mismatch failure mode.", "best_pr_reason": null, "canonical_issue_reason": "No canonical issue: the issues are heterogeneous across trainer API, config serialization, quantization/loading, offload, tokenizer, and inference bugs.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:44671", "reason": "Different bugs and code paths: CamemBERT masked-LM prediction error vs Qwen3.5 recurrent-state reset in modular_qwen3_5.py.", "right": "issue:44743"}, {"accept": false, "left": "issue:42994", "reason": "Both involve quantization/loading, but one is saving a quantized model and the other is GGUF loading under transformers v5; not the same failure.", "right": "issue:43482"}, {"accept": false, "left": "issue:36331", "reason": "Trainer compute_loss kwarg mismatch is unrelated to a Qwen3-MoE FineGrainedFP8Config failure.", "right": "issue:42915"}, {"accept": false, "left": "issue:42915", "reason": "Different root causes: FP8 config failure for Qwen3-MoE vs dequantized model load failure with device_map=auto offload.", "right": "issue:43317"}, {"accept": false, "left": "issue:43316", "reason": "Both are config/API issues, but they concern different model configs and different fields (Gemma3TextConfig vs GPTNeoX rotary_pct reload).", "right": "issue:44913"}, {"accept": false, "left": "issue:36331", "reason": "Unexpected kwarg in CustomTrainer.compute_loss is unrelated to failure to run 3d_parallel.py.", "right": "issue:42617"}, {"accept": false, "left": "issue:43550", "reason": "Bamba torch.compile/SDPA inference bug is unrelated to BigBirdTokenizer mask-token registration and decode output.", "right": "issue:43653"}, {"accept": false, "left": "issue:44898", "reason": "Perceiver non-default-resolution failure is a different bug from SwitchTransformers/TimmWrapperModel bfloat16 dtype mismatches.", "right": "issue:45072"}, {"accept": false, "left": "issue:43531", "reason": "Qwen3-MoE sliding_window behavior and StableLmConfig pad_token_idx regression are different model/config issues.", "right": "issue:43572"}, {"accept": false, "left": "issue:43606", "reason": "CPU-offload device mismatch in bark-small is unrelated to BigBirdTokenizer special-token registration.", "right": "issue:43653"}], "summary": "The soft pairs are all false positives: they point to different models, different code paths, or different failure modes rather than the same underlying bug. There is no single canonical duplicate issue in this set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44671|issue:44743", "issue:42994|issue:43482", "issue:36331|issue:42915", "issue:42915|issue:43317", "issue:43316|issue:44913", "issue:36331|issue:42617", "issue:43550|issue:43653", "issue:44898|issue:45072", "issue:43531|issue:43572", "issue:43606|issue:43653"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6608, "estimated_input_tokens": 3176, "item_count": 17, "node_count": 17, "serialized_chars": 12704, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:40:37Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "afba4bc2d0475e6e5b02ce9cb977622f4b4da5c239ec2be5bbf95d1c7044237d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33357", "issue:35707", "issue:36010", "issue:41720", "issue:42617", "issue:42915", "issue:43322", "issue:43388", "issue:43540", "issue:43782", "issue:43873", "issue:44292", "issue:44479", "issue:44514", "issue:44545", "issue:45059", "issue:45161"], "result": {"analyst_result": {"best_issue_reason": "44514 is the strongest issue to surface globally because it is the only clear duplicate target in the cluster and has the richest thread for that exact bug. It is not a good representative for the whole cluster, only for that duplicate pair.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44514 is the best canonical anchor for the only true duplicate pair: it is earlier, has much higher discussion/inbound activity, and the title matches 44545 exactly. The rest of the cluster is unrelated enough that it should not be collapsed into this canonical.", "canonical_pr_reason": null, "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "issue:44292", "reason": "Different bugs: one is a Qwen-3-8B-NVFP4 runtime error, the other is a video-input regression across several Qwen VL models.", "right": "issue:44479"}, {"accept": false, "left": "issue:41720", "reason": "Both mention model-parallel/device-mapping issues, but they describe different models and failure modes (cudaErrorAssert vs only TP not working).", "right": "issue:45161"}, {"accept": false, "left": "issue:41720", "reason": "Different concrete failures: Qwen3 auto device mapping on A800 versus an inability to run 3d_parallel.py.", "right": "issue:42617"}, {"accept": false, "left": "issue:43388", "reason": "Unrelated subsystems and symptoms: gather_for_metrics label truncation versus SAM3 text/bbox behavior.", "right": "issue:45059"}, {"accept": false, "left": "issue:43540", "reason": "Different code paths and errors: Qwen3OmniMoe video-input ValueError versus Qwen3VL from_pretrained weight_only loading failure.", "right": "issue:43782"}, {"accept": false, "left": "issue:42915", "reason": "These are distinct model-loading/runtime problems affecting different models and likely different code paths.", "right": "issue:43322"}, {"accept": false, "left": "issue:41720", "reason": "Too broad to merge: one is a specific A800 assert during auto device mapping, the other is an offloading/quantization behavior report.", "right": "issue:43873"}, {"accept": false, "left": "issue:36010", "reason": "Completely different issues: GenerationMixin import failure versus a 3d_parallel.py execution problem.", "right": "issue:42617"}, {"accept": true, "left": "issue:44514", "reason": "Same exact bug and same title: Qwen2_5_VLProcessor.apply_chat_template crashes on batched input when padding=False.", "right": "issue:44545"}, {"accept": false, "left": "issue:33357", "reason": "Unrelated failures: MacOS bus error with a CLIP model versus progressive generation using inputs_embeds and past_key_values.", "right": "issue:35707"}], "summary": "This cluster is mostly heterogeneous and does not represent one underlying bug. The only strong duplicate pair is the two Qwen2_5_VLProcessor.apply_chat_template crash reports (44514/44545)."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44292|issue:44479", "issue:41720|issue:45161", "issue:41720|issue:42617", "issue:43388|issue:45059", "issue:43540|issue:43782", "issue:42915|issue:43322", "issue:41720|issue:43873", "issue:36010|issue:42617", "issue:44514|issue:44545", "issue:33357|issue:35707"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6870, "estimated_input_tokens": 3307, "item_count": 18, "node_count": 18, "serialized_chars": 13227, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:40:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0f11d0329b9cef20380b62bae7ef429ce86e0b11fad5a037ba609bd72604cf8e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30064", "issue:34634", "issue:41720", "issue:43381", "issue:43606", "issue:43653", "issue:43720", "issue:43746", "issue:43782", "issue:43844", "issue:44368", "issue:44451", "issue:44492", "issue:44509", "issue:44991", "issue:45072", "issue:45357", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "No single issue is globally suitable as a duplicate anchor because the cluster spans unrelated bugs and documentation cleanup, so any canonical choice would be arbitrary.", "best_pr_reason": null, "canonical_issue_reason": "No canonical issue emerges; the set is heterogeneous and the listed pairs do not describe the same bug or change.", "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:30064", "reason": "Different processors and failures: void segmentation maps in Image Processor vs BarkProcessor voice_preset handling.", "right": "issue:34634"}, {"accept": false, "left": "issue:41720", "reason": "Unrelated problems: auto device mapping CUDA assert on Qwen3 vs GraniteSpeech PEFT/local checkpoint loading.", "right": "issue:43746"}, {"accept": false, "left": "issue:44492", "reason": "Both are docs-related, but one is a cache-strategy typo and the other is stale pipeline-task documentation; not the same change.", "right": "issue:44509"}, {"accept": false, "left": "issue:43653", "reason": "Tokenizer special-token registration vs BitNet packed-weight unpacking are different subsystems and fixes.", "right": "issue:43720"}, {"accept": false, "left": "issue:44368", "reason": "Both mention Qwen3.5, but one is a training warning about tie_word_embeddings and the other is a save_pretrained visual-encoder key regression.", "right": "issue:45357"}, {"accept": false, "left": "issue:43381", "reason": "Gradient checkpointing in eval mode is unrelated to gradient growth under DeepSpeed ZeRO-3.", "right": "issue:43844"}, {"accept": false, "left": "issue:43606", "reason": "Both are inference/device-dtype mismatches, but for different models and concrete failures; not the same bug path.", "right": "issue:45072"}, {"accept": false, "left": "issue:43782", "reason": "Different Qwen VL issues: weight_only load error vs incorrect vision_position_ids for video input.", "right": "issue:45381"}, {"accept": false, "left": "issue:44451", "reason": "Both are tokenizer/model loading regressions, but they affect different models and likely different compatibility issues.", "right": "issue:44991"}], "summary": "The soft pairs are all false positives. They share only broad surface themes like processors, loading errors, docs, or model-family names, but they describe different code paths and would not plausibly be merged as one fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:30064|issue:34634", "issue:41720|issue:43746", "issue:44492|issue:44509", "issue:43653|issue:43720", "issue:44368|issue:45357", "issue:43381|issue:43844", "issue:43606|issue:45072", "issue:43782|issue:45381", "issue:44451|issue:44991"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6938, "estimated_input_tokens": 3341, "item_count": 18, "node_count": 18, "serialized_chars": 13362, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:41:23Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1e0685943967ea51a4829b8e4feddd037f589da94a3fa2a58de40a68235d5c5f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34567", "issue:34634", "issue:35707", "issue:41720", "issue:42886", "issue:43421", "issue:43550", "issue:43720", "issue:43819", "issue:43927", "issue:43994", "issue:44062", "issue:44361", "issue:44451", "issue:44671", "issue:44843", "issue:44918", "issue:44936"], "result": {"analyst_result": {"best_issue_reason": "issue:44843 is the strongest overall representative: specific repro, explicit failure path, and it appears to subsume the broader symptom report in issue:42886.", "best_pr_reason": null, "canonical_issue_reason": "issue:44843 is the best canonical issue here because it identifies the concrete root cause in `_patch_mistral_regex` and directly explains the offline failure reported more broadly in issue:42886.", "canonical_pr_reason": null, "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "issue:44062", "reason": "Both are tokenizer-related, but one is an AddedToken construction error and the other is a model-loading failure for ScandiBERT; different root causes and fixes.", "right": "issue:44451"}, {"accept": false, "left": "issue:44918", "reason": "Both mention trainers, but one is a Qwen3.5 inputs_embeds/past_key_values unpacking problem and the other is a generic train-then-evaluate failure; not the same bug.", "right": "issue:44936"}, {"accept": false, "left": "issue:43550", "reason": "Different subsystems and failure modes: Bamba torch.compile/SDPA vs MLukeTokenizer task-time AttributeError.", "right": "issue:44361"}, {"accept": false, "left": "issue:43550", "reason": "Unrelated bugs: Bamba compile/SDPA vs DiaConfig save/load losing custom token IDs.", "right": "issue:43927"}, {"accept": false, "left": "issue:43994", "reason": "Both are model output correctness complaints, but they target different models and likely different code paths.", "right": "issue:44671"}, {"accept": false, "left": "issue:34634", "reason": "BarkProcessor voice_preset and Qwen3 auto device mapping cudaErrorAssert are unrelated.", "right": "issue:41720"}, {"accept": true, "left": "issue:42886", "reason": "These describe the same offline tokenizer regression: 44843 pinpoints the unconditional `model_info()` call in `_patch_mistral_regex`, which matches the broader HF_HUB_OFFLINE cache-load failure in 42886.", "right": "issue:44843"}, {"accept": false, "left": "issue:43421", "reason": "Both involve token handling, but one is runtime post-processor updates and the other is save/load of custom token IDs causing generation errors; distinct bugs.", "right": "issue:43927"}, {"accept": false, "left": "issue:34567", "reason": "TrainerState token-count tracking and progressive generation with inputs_embeds/past_key_values are different problems.", "right": "issue:35707"}, {"accept": false, "left": "issue:43720", "reason": "Different model-specific issues: BitNet packed-weight loading vs DAC from_latents/forward mismatch due to missing STE.", "right": "issue:43819"}], "summary": "This cluster is mostly heterogeneous and does not form a single duplicate set. The only strong overlap is the HF_HUB_OFFLINE tokenizer regression pair (42886/44843); 44843 is the clearest representative because it names the concrete failing code path. All other soft pairs look like distinct bugs in different models/components."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44062|issue:44451", "issue:44918|issue:44936", "issue:43550|issue:44361", "issue:43550|issue:43927", "issue:43994|issue:44671", "issue:34634|issue:41720", "issue:42886|issue:44843", "issue:43421|issue:43927", "issue:34567|issue:35707", "issue:43720|issue:43819"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7198, "estimated_input_tokens": 3471, "item_count": 18, "node_count": 18, "serialized_chars": 13884, "soft_pair_count": 11}, "cached_at": "2026-04-14T12:41:40Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6bb509ad02d2a8de81f9fccd4ca15386b9ff238069a321d3d2c89c63eadc4c11", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39692", "issue:43421", "issue:43525", "issue:43531", "issue:43596", "issue:43653", "issue:44077", "issue:44186", "issue:44220", "issue:44589", "issue:44610", "issue:44625", "issue:44898", "issue:44987", "issue:45042", "issue:45072", "issue:45081", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "No clear best representative issue: the cluster spans multiple unrelated failures, so selecting one would be arbitrary.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits this set; the issues are about different code paths and products rather than one underlying bug.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43531", "reason": "Different failures: Qwen3-MoE sliding-window behavior vs a Float8 storage lookup TypeError.", "right": "issue:44589"}, {"accept": false, "left": "issue:44987", "reason": "Unrelated problems: loading a specific model checkpoint vs PIL image processors incorrectly requiring torchvision.", "right": "issue:45042"}, {"accept": false, "left": "issue:44077", "reason": "One is a PatchTSMixer post-init policy issue; the other is Qwen3.5 num_labels propagation in config.", "right": "issue:44625"}, {"accept": false, "left": "issue:43421", "reason": "Both involve tokenizers, but one is runtime post-processor sync and the other is BigBird mask-token registration/decode behavior.", "right": "issue:43653"}, {"accept": false, "left": "issue:43421", "reason": "Different tokenizer bugs: special-token/post-processor updates vs LayoutLMv2 NER and padding/truncation crashes.", "right": "issue:44186"}, {"accept": false, "left": "issue:44186", "reason": "Different models and code paths: LayoutLMv2 tokenizer crashes vs Perceiver image-classification resize/interpolation failure.", "right": "issue:44898"}, {"accept": false, "left": "issue:45081", "reason": "Both touch tokenizer regex handling, but the concrete bugs differ: an AttributeError in patching vs codec handling and warning regression.", "right": "issue:45356"}, {"accept": false, "left": "issue:39692", "reason": "One is a SigLIP2 docs example error; the other is an OmDet-Turbo processor/model input-size mismatch.", "right": "issue:44610"}, {"accept": false, "left": "issue:44610", "reason": "Different domains and symptoms: image processor resolution mismatch vs bfloat16 dtype mismatch in inference.", "right": "issue:45072"}, {"accept": false, "left": "issue:43596", "reason": "Unrelated runtime errors: Deepspeed ZeRO3/BertModel indexing vs a `_torch_extract_fbank_features()` issue.", "right": "issue:44220"}, {"accept": false, "left": "issue:43525", "reason": "Different root causes: missing `pad_token_id` on Llama4Config vs Qwen3-MoE sliding_window behavior.", "right": "issue:43531"}], "summary": "This cluster is heterogeneous: the items cover unrelated tokenizer, model/config, image processor, audio feature, dtype, and docs bugs. None of the soft pairs are strong enough to merge as duplicates, so there is no single canonical issue/PR for the group."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43531|issue:44589", "issue:44987|issue:45042", "issue:44077|issue:44625", "issue:43421|issue:43653", "issue:43421|issue:44186", "issue:44186|issue:44898", "issue:45081|issue:45356", "issue:39692|issue:44610", "issue:44610|issue:45072", "issue:43596|issue:44220", "issue:43525|issue:43531"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6764, "estimated_input_tokens": 3254, "item_count": 17, "node_count": 17, "serialized_chars": 13013, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:42:05Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7bc8da7572dcc185e577d1fc5f2e9e4ac0c0d2c440f58f0d5b936df5e70c78bd", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39692", "issue:40444", "issue:43540", "issue:43550", "issue:43618", "issue:43653", "issue:43697", "issue:43716", "issue:43746", "issue:43819", "issue:43873", "issue:44008", "issue:44220", "issue:44661", "issue:44843", "issue:45081", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "45081 is the best single representative if one must be chosen, because it pinpoints a concrete runtime crash in a specific code path with a direct repro.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45081 is the clearest, most concrete report in the only near-related subcluster (_patch_mistral_regex) and has the most actionable crash reproduction.", "canonical_pr_reason": null, "confidence": 0.67, "soft_edge_verdicts": [{"accept": false, "left": "issue:43746", "reason": "Different models and different failures: PEFT/local checkpoint loading vs Qwen2.5-VL video position IDs.", "right": "issue:45381"}, {"accept": false, "left": "issue:43540", "reason": "Both are multimodal, but one is Qwen video batching with IterableDataset and the other is a Gemma3n audio mask attribute collision.", "right": "issue:44008"}, {"accept": false, "left": "issue:43653", "reason": "Unrelated components: BigBirdTokenizer special-token decoding vs DAC latent/forward mismatch.", "right": "issue:43819"}, {"accept": false, "left": "issue:44220", "reason": "Audio feature extraction bug vs tokenizer-mapping failure in add-new-model-like; different subsystems and symptoms.", "right": "issue:44661"}, {"accept": false, "left": "issue:44843", "reason": "Both touch _patch_mistral_regex, but one is an offline Hub lookup problem and the other is a tokenizer attribute crash; distinct failure modes, not a single duplicate bug.", "right": "issue:45081"}, {"accept": false, "left": "issue:43716", "reason": "Different model families and root causes: Mistral-3 image dtype mismatch vs Gemma3n variable-name collision.", "right": "issue:44008"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 doc example errors are unrelated to quantization/offloading behavior.", "right": "issue:43873"}, {"accept": false, "left": "issue:43550", "reason": "Bamba torch.compile/SDPA issue is unrelated to DAC.from_latents vs forward mismatch.", "right": "issue:43819"}, {"accept": false, "left": "issue:40444", "reason": "Qwen2.5-VL iterable dataset multi-image failure is unrelated to GraniteSpeech PEFT checkpoint loading.", "right": "issue:43746"}, {"accept": false, "left": "issue:43618", "reason": "CLIPOutput attentions regression and RTDetrV2 output divergence are different model-output bugs with different code paths.", "right": "issue:43697"}], "summary": "The set is mostly heterogeneous; most pairs are clearly different bugs in different models/components. The only close thematic overlap is the Mistral tokenizer regex helper, but even that looks like two separate failure modes rather than one duplicate bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43746|issue:45381", "issue:43540|issue:44008", "issue:43653|issue:43819", "issue:44220|issue:44661", "issue:44843|issue:45081", "issue:43716|issue:44008", "issue:39692|issue:43873", "issue:43550|issue:43819", "issue:40444|issue:43746", "issue:43618|issue:43697"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6564, "estimated_input_tokens": 3154, "item_count": 17, "node_count": 17, "serialized_chars": 12615, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:42:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3bbd3656858b42e3b3dc1db7b7c8afc9a38c633e960fce7f11e17f4243208e39", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30064", "issue:36010", "issue:43335", "issue:43421", "issue:43540", "issue:43550", "issue:43720", "issue:43746", "issue:43756", "issue:43873", "issue:44062", "issue:44186", "issue:44442", "issue:44488", "issue:44811", "issue:45072", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:44186", "reason": "Both are tokenizer-related, but they affect different models and failure modes: LayoutLMv2 NER/padding crashes vs AutoTokenizer loading FastSpeech2ConformerTokenizer.", "right": "issue:44442"}, {"accept": false, "left": "issue:43746", "reason": "One is PEFT/local checkpoint loading for GraniteSpeech, the other is quantization/offloading behavior; same broad loading area but not the same bug.", "right": "issue:43873"}, {"accept": false, "left": "issue:43335", "reason": "Both concern model configuration, but they are unrelated model-specific architecture mistakes (SwitchTransformers sparse layers vs Smollm3 RoPE layer handling).", "right": "issue:43756"}, {"accept": false, "left": "issue:44062", "reason": "The first is a tokenizer AddedToken argument error; the second is a model/tokenizer load failure for cjvt/sleng-bert. Similar surface area, different root causes.", "right": "issue:44488"}, {"accept": false, "left": "issue:43720", "reason": "Different subsystems and symptoms: BitNet packed-weight loading vs dtype mismatch in bfloat16 inference.", "right": "issue:45072"}, {"accept": false, "left": "issue:43550", "reason": "Different concrete failures: torch.compile/SDPA on Bamba-9B-v2 vs bfloat16 dtype mismatch in SwitchTransformers/TimmWrapperModel.", "right": "issue:45072"}, {"accept": false, "left": "issue:30064", "reason": "Completely unrelated: image processor segmentation-map handling vs a GenerationMixin import error.", "right": "issue:36010"}, {"accept": false, "left": "issue:43421", "reason": "Both touch tokenizer/processor behavior, but one is runtime special-token post-processing updates and the other is Whisper batch_decode skip_special_tokens handling.", "right": "issue:44811"}, {"accept": false, "left": "issue:43540", "reason": "Both mention video inputs in multimodal models, but they are different model families and likely different position-id code paths.", "right": "issue:45381"}], "summary": "This cluster is a loose collection of distinct bug reports around tokenization, model loading, multimodal/video inputs, and inference/runtime issues. None of the soft pairs look like the same underlying bug/change, so I would not merge any of them as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44186|issue:44442", "issue:43746|issue:43873", "issue:43335|issue:43756", "issue:44062|issue:44488", "issue:43720|issue:45072", "issue:43550|issue:45072", "issue:30064|issue:36010", "issue:43421|issue:44811", "issue:43540|issue:45381"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7016, "estimated_input_tokens": 3380, "item_count": 18, "node_count": 18, "serialized_chars": 13520, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:42:36Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fdd92c220cfbe7e6d9a2261fdba9fc783bf1bf934e12d8dbebad94a5291a63ec", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43531", "issue:43577", "issue:43582", "issue:43643", "issue:43644", "issue:43749", "issue:43782", "issue:43818", "issue:43825", "issue:43994", "issue:44448", "issue:44561", "issue:44671", "issue:44877", "issue:44977", "issue:45020", "issue:45081", "issue:45406"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43577", "reason": "Blip2 dtype-casting bug vs Video-LLaVA missing temporal attention / shared weights; different models, different failure modes, no shared code-path evidence.", "right": "issue:43818"}, {"accept": false, "left": "issue:44561", "reason": "Both mention trust_remote_code regressions, but one is specifically removal of is_torch_fx_available and the other is a broader remote_code breakage report across recent versions.", "right": "issue:45020"}, {"accept": false, "left": "issue:43643", "reason": "AutoConfig returning missing fields with trust_remote_code=True is a different problem from strict config rejection of granite_speech; one is object completeness, the other is validation strictness.", "right": "issue:44877"}, {"accept": false, "left": "issue:45081", "reason": "Different crashes in different components: Mistral tokenizer regex patching vs Gemma4Processor missing _tokenizer in serve.", "right": "issue:45406"}, {"accept": false, "left": "issue:44448", "reason": "Both are model-output regressions in v5, but they affect different models and likely distinct inference paths; not the same concrete bug.", "right": "issue:44671"}, {"accept": false, "left": "issue:43782", "reason": "Qwen3VL loading with weight_only=True vs Qwen3.5 generation with flash-attention are separate issues in different model/runtime paths.", "right": "issue:44977"}, {"accept": false, "left": "issue:43531", "reason": "Sliding_window behavior in Qwen3-MoE is unrelated to AutoConfig missing fields under trust_remote_code.", "right": "issue:43643"}, {"accept": false, "left": "issue:43577", "reason": "Blip2 dtype handling and Apple Silicon caching_allocator_warmup TypeError are unrelated bugs in different subsystems.", "right": "issue:43582"}, {"accept": false, "left": "issue:43644", "reason": "Both are loading-related regressions, but one is about non-persistent buffers being initialized with junk and the other about FSDP_CPU_RAM_EFFICIENT_LOADING failing; not the same code defect.", "right": "issue:43749"}, {"accept": false, "left": "issue:43825", "reason": "Pipeline translation-message wording is a UI/help-text issue, while SigLIP2 nonsensical outputs is a model/pipeline correctness issue.", "right": "issue:43994"}], "summary": "None of the proposed soft pairs look like the same underlying bug or change; they are related only by broad subsystem or model-family overlap. I would keep all of them separate and not assign a canonical issue/PR for the cluster."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43577|issue:43818", "issue:44561|issue:45020", "issue:43643|issue:44877", "issue:45081|issue:45406", "issue:44448|issue:44671", "issue:43782|issue:44977", "issue:43531|issue:43643", "issue:43577|issue:43582", "issue:43644|issue:43749", "issue:43825|issue:43994"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6862, "estimated_input_tokens": 3303, "item_count": 18, "node_count": 18, "serialized_chars": 13209, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:42:51Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6e1b89f04d21ecfe947928aedcef5aecc521a85a7c31490446ff8ec588d603f3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30064", "issue:36331", "issue:41720", "issue:42175", "issue:43650", "issue:43720", "issue:43782", "issue:43819", "issue:43825", "issue:43827", "issue:43906", "issue:44361", "issue:44610", "issue:44805", "issue:44936", "issue:44991", "issue:45081", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": "issue:43825 is the most suitable representative issue because it captures a specific regression with a clear reproduction and broader relevance than the adjacent docs-only report.", "best_pr_reason": null, "canonical_issue_reason": "issue:43825 is the best anchor only because it is a concrete, user-facing bug report in the closest related subgroup; the rest of the items are largely unrelated one-off issues.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43819", "reason": "Different code paths and symptoms: DAC latent conversion vs MLukeTokenizer task attribute errors.", "right": "issue:44361"}, {"accept": false, "left": "issue:44805", "reason": "Both are training/evaluation failures, but the mask shape mismatch and trainer.evaluate-after-train behavior are distinct bugs.", "right": "issue:44936"}, {"accept": false, "left": "issue:43650", "reason": "No shared underlying bug is evident; one is an empty/low-information issue, the other is a reproduction of an existing separate issue.", "right": "issue:43906"}, {"accept": false, "left": "issue:41720", "reason": "Unrelated failures: Qwen3 auto device mapping on A800 vs tokenizer loading for EMBEDDIA/est-roberta under v5.", "right": "issue:44991"}, {"accept": false, "left": "issue:43720", "reason": "Different subsystems and fixes: BitNet packed-weight loading vs OmDet-Turbo processor image-size mismatch.", "right": "issue:44610"}, {"accept": false, "left": "issue:42175", "reason": "Backend packaging/install issue is unrelated to the Mistral tokenizer regex patch crash.", "right": "issue:45081"}, {"accept": false, "left": "issue:43825", "reason": "Related topic area, but not the same bug: one is an incorrect runtime error message, the other is stale documentation still mentioning pipeline().", "right": "issue:43827"}, {"accept": false, "left": "issue:30064", "reason": "Completely different components and failures: image segmentation-map processing vs CustomTrainer.compute_loss signature mismatch.", "right": "issue:36331"}, {"accept": false, "left": "issue:43782", "reason": "Both involve vision-language models, but they describe different model-specific regressions with different failure mechanisms.", "right": "issue:45325"}], "summary": "This cluster is mostly a set of unrelated issues. The only near-match is the v5 pipeline/docs pair, but it\u2019s still a bug-vs-docs mismatch rather than the same underlying defect. No PRs are present."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43819|issue:44361", "issue:44805|issue:44936", "issue:43650|issue:43906", "issue:41720|issue:44991", "issue:43720|issue:44610", "issue:42175|issue:45081", "issue:43825|issue:43827", "issue:30064|issue:36331", "issue:43782|issue:45325"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6616, "estimated_input_tokens": 3180, "item_count": 17, "node_count": 17, "serialized_chars": 12719, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:43:00Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "405ecf8340c06730a19b7223a7aa439738a2642babaf4035a80dc4e6c1b1eaac", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34634", "issue:36010", "issue:43262", "issue:43526", "issue:43582", "issue:43653", "issue:43697", "issue:43749", "issue:43819", "issue:44060", "issue:44291", "issue:44368", "issue:44485", "issue:44509", "issue:44554", "issue:44671", "issue:44857"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:44485", "reason": "Both mention v5-era behavior changes, but one is about GLM-5 RoPE implementation and the other is about removed pipeline docs; these are different topics and different fixes.", "right": "issue:44509"}, {"accept": false, "left": "issue:44060", "reason": "Both concern Qwen3/Qwen3.5 tied-weights warnings, but the first is a concrete incorrect tying bug across layers and the second is a user-facing warning during LoRA fine-tuning; not the same underlying change.", "right": "issue:44368"}, {"accept": false, "left": "issue:34634", "reason": "BarkProcessor voice_preset failure and GenerationMixin import error are unrelated failures in different code paths.", "right": "issue:36010"}, {"accept": false, "left": "issue:43582", "reason": "One is an Apple Silicon allocator warmup TypeError; the other is an AMP/CUDA crash in LwDetrImageLoss. Different backend and different bug.", "right": "issue:44857"}, {"accept": false, "left": "issue:43749", "reason": "FSDP CPU RAM efficient loading and init_empty_weights/_is_hf_initialized TypeError are both loading-related, but they point to different mechanisms and code paths.", "right": "issue:44291"}, {"accept": false, "left": "issue:43526", "reason": "BeitImageProcessorFast reduce_labels truncating labels is unrelated to RTDetrV2 output divergence in v5.", "right": "issue:43697"}, {"accept": false, "left": "issue:43653", "reason": "BigBirdTokenizer special-token registration/empty decode and CamemBERT masked-LM prediction correctness are different model/tokenizer-specific issues.", "right": "issue:44671"}, {"accept": false, "left": "issue:43526", "reason": "reduce_labels bug in BeitImageProcessorFast and the MPS attention correctness issue are distinct implementations with no shared code path.", "right": "issue:44554"}, {"accept": false, "left": "issue:43262", "reason": "Audio processor chat-template sampling-rate defaulting and DAC.from_latents/forward mismatch are unrelated audio bugs.", "right": "issue:43819"}], "summary": "This cluster is not a single duplicate set; it contains many unrelated bug reports spanning audio processors, tokenizers, model warnings, loading errors, docs, and backend/runtime issues. The soft pairs are only superficially similar and should all be kept separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44485|issue:44509", "issue:44060|issue:44368", "issue:34634|issue:36010", "issue:43582|issue:44857", "issue:43749|issue:44291", "issue:43526|issue:43697", "issue:43653|issue:44671", "issue:43526|issue:44554", "issue:43262|issue:43819"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6892, "estimated_input_tokens": 3318, "item_count": 18, "node_count": 18, "serialized_chars": 13271, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:43:12Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "810bbacb863337a5200db4b06fba24553306513c6642cda8a67557ef6522aaf8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34634", "issue:36331", "issue:43257", "issue:43329", "issue:43452", "issue:43454", "issue:43577", "issue:43638", "issue:43653", "issue:43746", "issue:43854", "issue:44589", "issue:44811", "issue:44912", "issue:44936", "issue:44977", "issue:44991", "issue:45042"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43452 \u2013 gguf_file breaks for AutoTokenizer.from_pretrained and AutoModelForCausalLM.from_pretrained", "reason": "Both are loading-related, but one is about gguf_file handling in tokenizer/model loading and the other is a specific tokenizer compatibility regression for a particular model. Different failure modes and code paths.", "right": "issue:44991 \u2013 transformers >= 5.0.0 fails loading tokenizer for EMBEDDIA/est-roberta"}, {"accept": false, "left": "issue:43638 \u2013 IndexError: index 0 is out of bounds for dimension 0 with size 0 with deepspeed zero3 traininig and a non pretrained Bert model", "reason": "The first is a DeepSpeed ZeRO-3 initialization/indexing failure with a non-pretrained BERT; the second is an evaluation-after-training lifecycle bug. Too different to be the same defect.", "right": "issue:44936 \u2013 trainer.evaluate() fails after trainer.train()"}, {"accept": false, "left": "issue:43454 \u2013 [BUG] AyaVisionConfig fails to tie lm_head weights causing garbage text generation", "reason": "Both concern model loading, but one is a weight-tying/text-generation correctness bug and the other is dtype propagation for BLIP-2/QFormer. Not the same code-path problem.", "right": "issue:43577 \u2013 model.dtype and model.qformer.dtype remain float32 when loading Blip2 model with dtype=torch.float16 or torch.bfloat16"}, {"accept": false, "left": "issue:43746 \u2013 [GraniteSpeechForConditionalGeneration] Models with PEFT adapters won't load from local checkpoints (from_pretrained)", "reason": "Local checkpoint loading with PEFT adapters is unrelated to Whisper processor decoding behavior. Different components and different symptom classes.", "right": "issue:44811 \u2013 Whisper processor.batch_decode() function ignoring skip_special_tokens params"}, {"accept": false, "left": "issue:34634 \u2013 BarkProcessor voice_preset doesn't work", "reason": "One is a BarkProcessor configuration issue; the other is a Trainer API signature mismatch. No shared code path or underlying bug.", "right": "issue:36331 \u2013 TypeError: CustomTrainer.compute_loss() got an unexpected keyword argument 'num_items_in_batch'"}, {"accept": false, "left": "issue:43653 \u2013 [BUG][CI] BigBirdTokenizer mask token not registered as special token, gives empty decode output", "reason": "Tokenizer special-token registration/decoding is unrelated to quantization fallback during model loading. Different subsystems and fixes.", "right": "issue:44912 \u2013 git-oss-20b will not properly load with MXFP4 quantization and falls back to bf16"}, {"accept": false, "left": "issue:44589 \u2013 TypeError: couldn't find storage object Float8_e4m3fnStorage", "reason": "Float8 storage lookup is a tensor/storage backend issue, while the PIL backend problem is an image-processor dependency check. Not the same bug.", "right": "issue:45042 \u2013 PIL backend image processors incorrectly require torchvision in v5.4.0"}, {"accept": false, "left": "issue:43854 \u2013 Unable to load `zai-org/GLM-4.7-Flash` model correctly in the unit tests", "reason": "Both mention model loading/generation, but one is a specific unit-test loading failure for GLM-4.7-Flash and the other is a flash-attention generation issue for Qwen3.5. Different models and failure mechanisms.", "right": "issue:44977 \u2013 Qwen3.5 cannot generate normally with flash-attention"}, {"accept": false, "left": "issue:43257 \u2013 Qwen3 MOE weights not converted when loading with accelerate + deepspeed", "reason": "One is MoE weight conversion during Accelerate/DeepSpeed loading; the other is an undefined-variable bug in multimodal token counting for video inputs. No plausible shared patch.", "right": "issue:43329 \u2013 [BUG] _get_num_multimodal_tokens: video branch uses undefined a) get_number_of_video_patches, b)merge_size. Tests never hit video route (multiple VLM processors)"}], "summary": "These items are mostly unrelated bugs across different subsystems; none of the soft-similarity pairs look like the same underlying issue, so no duplicate consolidation is warranted."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43452|issue:44991", "issue:43638|issue:44936", "issue:43454|issue:43577", "issue:43746|issue:44811", "issue:34634|issue:36331", "issue:43653|issue:44912", "issue:44589|issue:45042", "issue:43854|issue:44977", "issue:43257|issue:43329"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6824, "estimated_input_tokens": 3284, "item_count": 18, "node_count": 18, "serialized_chars": 13134, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:43:32Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "cbec150ee9310b08782d4d229e72c980cb5957225ef889afc7ca4eda0cbe4252", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:38175", "issue:39692", "issue:42175", "issue:43257", "issue:43262", "issue:43531", "issue:43645", "issue:43696", "issue:43749", "issue:44568", "issue:44610", "issue:44756", "issue:44811", "issue:44912", "issue:44938", "issue:45042", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "Issue 39692 is the strongest standalone report in the list because it is specific, well-scoped, and has the clearest end-user impact; however, it should be treated as an independent issue, not a canonical duplicate target.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue is justified here because the set does not form one duplicate cluster; if a placeholder is needed, issue 39692 is the most detailed and concrete report, but it is not a duplicate of the others.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:44568", "reason": "Both mention tokenizer regressions, but they affect different models and different failure modes: missing BOS/EOS insertion vs codec handling / warning behavior. Not the same underlying bug.", "right": "issue:45356"}, {"accept": false, "left": "issue:38175", "reason": "One is about zero probabilities in SigLIP2 output, the other about a processor/model input-size mismatch in OmDet-Turbo. Different models and different code paths.", "right": "issue:44610"}, {"accept": false, "left": "issue:36010", "reason": "Importing GenerationMixin from a moved module is an import-path issue; the other is a Jupyter/custom-model initialization regression. Related to Transformers internals, but not the same defect.", "right": "issue:43645"}, {"accept": false, "left": "issue:43257", "reason": "Qwen3 MoE weight conversion under accelerate/deepspeed and GPT-oss OOM are different problems with different symptoms and fixes.", "right": "issue:43696"}, {"accept": false, "left": "issue:39692", "reason": "Both involve model loading/quantization, but they concern different models and distinct failures: a SigLIP2 doc example issue versus MXFP4 loading falling back to bf16.", "right": "issue:44912"}, {"accept": false, "left": "issue:42175", "reason": "One is a backend dependency/install issue for torch extras; the other is a Python 3.14 import/load failure. Different root causes.", "right": "issue:44938"}, {"accept": false, "left": "issue:43749", "reason": "Both are memory/loading-related, but one is a broken FSDP CPU RAM efficient loading path and the other is a Strix Halo mmap-related OOM workaround. Not the same bug.", "right": "issue:44756"}, {"accept": false, "left": "issue:43262", "reason": "Different audio-processor APIs and different incorrect defaults/ignored parameters: sampling-rate defaulting in apply_chat_template vs skip_special_tokens being ignored in batch_decode.", "right": "issue:44811"}, {"accept": false, "left": "issue:43531", "reason": "Qwen3-MoE sliding_window behavior and a PIL backend torchvision dependency regression are unrelated subsystem issues.", "right": "issue:45042"}], "summary": "These are mostly unrelated issue reports spanning different models, tokenizers, loading paths, and processor bugs. None of the soft pairs look like the same underlying defect, so I would not merge any of them as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44568|issue:45356", "issue:38175|issue:44610", "issue:36010|issue:43645", "issue:43257|issue:43696", "issue:39692|issue:44912", "issue:42175|issue:44938", "issue:43749|issue:44756", "issue:43262|issue:44811", "issue:43531|issue:45042"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6586, "estimated_input_tokens": 3165, "item_count": 17, "node_count": 17, "serialized_chars": 12657, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:43:50Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "926cd2f1f2adeb1c34e2513c4dba3450c60eacec59c2aca89130385c6d29d776", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:36331", "issue:41720", "issue:43257", "issue:43329", "issue:43452", "issue:43540", "issue:43550", "issue:43582", "issue:43697", "issue:43756", "issue:43931", "issue:44361", "issue:44442", "issue:44479", "issue:44912", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43329", "reason": "Both involve video/VLM processing in Qwen models, but 43329 is an undefined-variable bug in `_get_num_multimodal_tokens`, while 44479 is a broader v5.3.0 video-input regression across multiple model families. Different root causes and likely fixes.", "right": "issue:44479"}, {"accept": false, "left": "issue:43257", "reason": "Different models and failures: 43257 is Qwen3 MoE weight conversion under accelerate+deepspeed, while 43756 is Smollm3 RoPE layer configuration. Same general model-loading area, not the same bug.", "right": "issue:43756"}, {"accept": false, "left": "issue:43550", "reason": "Bamba-9B-v2 torch.compile+SDPA failure and git-oss-20b MXFP4 quantization fallback are unrelated issues in different model paths.", "right": "issue:44912"}, {"accept": false, "left": "issue:36010", "reason": "One is an import error for `GenerationMixin`; the other is a trainer API signature mismatch for `compute_loss`. Completely different problems.", "right": "issue:36331"}, {"accept": false, "left": "issue:43697", "reason": "RTDetrV2 output mismatch in Transformers v5 and Qwen3.5 `save_pretrained` saving wrong visual encoder keys are distinct model-specific regressions.", "right": "issue:45357"}, {"accept": false, "left": "issue:43257", "reason": "Both are Qwen3-related loading problems, but one concerns MoE conversion with accelerate/deepspeed and the other is a Qwen3-VL weight-shape mismatch. Not the same concrete defect.", "right": "issue:43931"}, {"accept": false, "left": "issue:41720", "reason": "Qwen3 auto device mapping CUDA assert on A800 and Qwen3OmniMoe video-input ValueError involve different execution stages and failure modes.", "right": "issue:43540"}, {"accept": false, "left": "issue:44361", "reason": "Both are tokenizer-related, but they affect different tokenizer classes and different errors (`MLukeTokenizer` AttributeError vs `AutoTokenizer` failing on `FastSpeech2ConformerTokenizer`).", "right": "issue:44442"}, {"accept": false, "left": "issue:43452", "reason": "`gguf_file` loading breakage and `caching_allocator_warmup` TypeError on Apple Silicon are unrelated subsystems and bugs.", "right": "issue:43582"}], "summary": "This cluster is heterogeneous: the paired items share broad themes like Qwen, tokenizers, loading, or video inputs, but each points to a different concrete bug or code path. I do not see a true duplicate canon here."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43329|issue:44479", "issue:43257|issue:43756", "issue:43550|issue:44912", "issue:36010|issue:36331", "issue:43697|issue:45357", "issue:43257|issue:43931", "issue:41720|issue:43540", "issue:44361|issue:44442", "issue:43452|issue:43582"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7170, "estimated_input_tokens": 3457, "item_count": 18, "node_count": 18, "serialized_chars": 13825, "soft_pair_count": 11}, "cached_at": "2026-04-14T12:44:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "750ec774c3c5ace0fcceb08bc1b6097062d54414c5be838c6d9cd4b98e9b0650", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42175", "issue:42915", "issue:43329", "issue:43450", "issue:43540", "issue:43577", "issue:43824", "issue:43906", "issue:44062", "issue:44291", "issue:44479", "issue:44488", "issue:44560", "issue:44821", "issue:44991", "issue:45042", "issue:45072", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "44479 is the most representative issue for the only plausible duplicate subgroup here; it is broader and better scoped than the more specific follow-up report.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44479 is the best umbrella candidate because it is the broadest video-input regression report and overlaps the same Qwen video code path as the closest match in the cluster. That said, the cluster is too mixed to treat it as a true global canonical for every item.", "canonical_pr_reason": null, "confidence": 0.86, "soft_edge_verdicts": [{"accept": false, "left": "issue:44062", "reason": "Both are tokenizer-related, but one is an AddedToken argument TypeError and the other is a model-specific tokenizer load failure; they do not look like the same bug.", "right": "issue:44991"}, {"accept": true, "left": "issue:44479", "reason": "Both describe the same Qwen 2.5/3 video-input regression in v5.3.0, with 45381 sounding like a specific manifestation of the broader video-position-id bug reported in 44479.", "right": "issue:45381"}, {"accept": false, "left": "issue:43329", "reason": "These are both multimodal/video issues, but one is an undefined-variable bug in token counting and the other is a batched-shape problem in video processors; different code paths.", "right": "issue:43450"}, {"accept": false, "left": "issue:42175", "reason": "Unrelated problems: packaging/backend dependency resolution versus an import error for a missing model class.", "right": "issue:43824"}, {"accept": false, "left": "issue:43540", "reason": "Both involve video inputs, but the affected models and failure modes differ enough that they do not clearly share the same underlying bug.", "right": "issue:44560"}, {"accept": false, "left": "issue:44291", "reason": "These are unrelated: one is an init_empty_weights/_is_hf_initialized constructor issue, the other is an image-processor torchvision dependency regression.", "right": "issue:45042"}, {"accept": false, "left": "issue:43577", "reason": "Both mention dtype mismatches, but they affect different model families and appear to be separate loading/inference bugs.", "right": "issue:45072"}, {"accept": false, "left": "issue:42175", "reason": "No meaningful overlap: pip backend dependency inclusion versus a tokenizer/model loading issue for a specific checkpoint.", "right": "issue:44488"}, {"accept": false, "left": "issue:43906", "reason": "The first is just an isolated reproduction of another issue, while the second is a Qwen video-position-id bug; there is no clear shared underlying defect.", "right": "issue:45381"}, {"accept": false, "left": "issue:42915", "reason": "Different subsystems and failures: FP8 config handling for Qwen3Moe versus AutoImageProcessor URL loading.", "right": "issue:44821"}, {"accept": false, "left": "issue:43540", "reason": "Both are video-related, but they refer to different model families and different failure signatures; not enough evidence they are the same concrete bug.", "right": "issue:44479"}], "summary": "The cluster is mostly noisy and heterogeneous: tokenizer loading, dtype mismatches, import/install problems, and several distinct video-input bugs. Only one soft edge looks like the same underlying regression in the Qwen video path."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44062|issue:44991", "issue:44479|issue:45381", "issue:43329|issue:43450", "issue:42175|issue:43824", "issue:43540|issue:44560", "issue:44291|issue:45042", "issue:43577|issue:45072", "issue:42175|issue:44488", "issue:43906|issue:45381", "issue:42915|issue:44821", "issue:43540|issue:44479"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6824, "estimated_input_tokens": 3284, "item_count": 18, "node_count": 18, "serialized_chars": 13135, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:44:44Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "acdd0b15c430425dfba69af59300d4c085caae8220fea2e8d3ebe5b1bead63d9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33357", "issue:34567", "issue:36010", "issue:42175", "issue:42915", "issue:43582", "issue:43643", "issue:43720", "issue:43742", "issue:43756", "issue:43782", "issue:44484", "issue:44704", "issue:44898", "issue:45137", "issue:45325", "issue:45375", "issue:45405"], "result": {"analyst_result": {"best_issue_reason": "Issue 43720 is the most concrete and well-scoped report (BitNet packed weights not being unpacked during accelerate loading), but it still does not subsume the other reports.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue emerges here; the cluster is too heterogeneous and the soft links span unrelated bugs. If a representative had to be picked, it would only be a weak convention, not a true duplicate hub.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:42915", "reason": "Different models and different failure modes: FineGrainedFP8Config handling vs a key error while loading MobileLLM. Same general area, not the same bug.", "right": "issue:43742"}, {"accept": false, "left": "issue:33357", "reason": "Unrelated problems: a MacOS bus error with a CLIP model versus TrainerState's token counter not updating.", "right": "issue:34567"}, {"accept": false, "left": "issue:43720", "reason": "Both are model loading/inference failures, but they affect different architectures and code paths; no evidence they share one root cause or fix.", "right": "issue:44898"}, {"accept": false, "left": "issue:43756", "reason": "Both mention RoPE/vision position logic, but the bugs are model-specific and semantically different (Smollm3 layer dropping vs Qwen2.5-VL temporal scaling).", "right": "issue:45325"}, {"accept": false, "left": "issue:45137", "reason": "Completely different concerns: DeepSpeed ZeRO3 deque underflow versus a packaging/version bump for MIN_PEFT_VERSION.", "right": "issue:45405"}, {"accept": false, "left": "issue:44484", "reason": "Different APIs and fixes: save_pretrained shard sizing vs AutoProcessor kwarg forwarding to cached_file.", "right": "issue:44704"}, {"accept": false, "left": "issue:36010", "reason": "An import-path regression for GenerationMixin is unrelated to TensorFlow being omitted from the '.[torch]' install extra.", "right": "issue:42175"}, {"accept": false, "left": "issue:43582", "reason": "Different runtime errors on different models/platforms: Apple Silicon allocator TypeError vs Qwen3VL weight_only loading error.", "right": "issue:43782"}, {"accept": false, "left": "issue:43643", "reason": "Both involve config fields being lost, but the code paths differ: remote-code config construction vs a strict model config missing a specific field.", "right": "issue:45375"}], "summary": "The soft links look like false positives: the issues mostly share broad keywords such as loading, config, or model-specific failures, but they do not appear to describe the same underlying bug or a mergeable single fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42915|issue:43742", "issue:33357|issue:34567", "issue:43720|issue:44898", "issue:43756|issue:45325", "issue:45137|issue:45405", "issue:44484|issue:44704", "issue:36010|issue:42175", "issue:43582|issue:43782", "issue:43643|issue:45375"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7030, "estimated_input_tokens": 3387, "item_count": 18, "node_count": 18, "serialized_chars": 13547, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:45:03Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e2a3d86158370de9a60efe70d44f792c2954711f2f37bcfd9b2afb275fb8d701", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:28282", "issue:29127", "issue:34689", "issue:38175", "issue:43452", "issue:43493", "issue:43646", "issue:43653", "issue:43742", "issue:43749", "issue:43994", "issue:44062", "issue:44291", "issue:44361", "issue:44488", "issue:44821", "issue:44898", "issue:45081"], "result": {"analyst_result": {"best_issue_reason": "Issue 43493 is the best representative issue: it names the model family and frames the underlying HF-vs-JAX implementation discrepancy directly.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43493 is the clearest root-cause report for the SigLIP2 problem and best matches the duplicate symptom reported in 43994.", "canonical_pr_reason": null, "confidence": 0.76, "soft_edge_verdicts": [{"accept": false, "left": "issue:38175", "reason": "Same broad model family, but different symptoms and contexts: zero probabilities vs. interpolation/resolution handling.", "right": "issue:44898"}, {"accept": false, "left": "issue:44062", "reason": "Both are tokenizer-loading errors, but the concrete failures differ: AddedToken argument handling vs. Mistral regex backend access.", "right": "issue:45081"}, {"accept": false, "left": "issue:43646", "reason": "Both mention loading/init regressions, but they affect different mechanisms and code paths (custom init vs. FSDP CPU-efficient loading).", "right": "issue:43749"}, {"accept": false, "left": "issue:43742", "reason": "Different root causes and stack paths: a key error while loading MobileLLM versus an unexpected _is_hf_initialized argument with init_empty_weights.", "right": "issue:44291"}, {"accept": true, "left": "issue:43493", "reason": "Both report the same SigLIP2 model producing wrong outputs; 43493 describes the underlying implementation mismatch and 43994 shows the user-facing symptom.", "right": "issue:43994"}, {"accept": false, "left": "issue:28282", "reason": "Completely different issues: missing PyTorch dependency versus a model-loading regression for Llama 3.2 Vision.", "right": "issue:34689"}, {"accept": false, "left": "issue:43452", "reason": "Both involve loading models/tokenizers, but there is no clear shared bug or same failing code path from the titles alone.", "right": "issue:44488"}, {"accept": false, "left": "issue:44291", "reason": "Different artifact types and failure modes: empty-weights initialization type error versus loading an AutoImageProcessor from URL.", "right": "issue:44821"}, {"accept": false, "left": "issue:43653", "reason": "Tokenizer-related, but these are distinct tokenizers and distinct bugs; not enough evidence of the same underlying defect.", "right": "issue:44361"}, {"accept": false, "left": "issue:28282", "reason": "These are unrelated: missing PyTorch import dependency versus a LayoutLMv3 error-message/box-content issue.", "right": "issue:29127"}], "summary": "The set is mostly a mix of unrelated bug reports across different models and code paths. The only likely duplicate pair is the two SigLIP2 issues; the rest should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:38175|issue:44898", "issue:44062|issue:45081", "issue:43646|issue:43749", "issue:43742|issue:44291", "issue:43493|issue:43994", "issue:28282|issue:34689", "issue:43452|issue:44488", "issue:44291|issue:44821", "issue:43653|issue:44361", "issue:28282|issue:29127"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6548, "estimated_input_tokens": 3146, "item_count": 17, "node_count": 17, "serialized_chars": 12584, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:45:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "92179bb6acec27eed7fc35c61ac144f51681de1b4455f06d0cf28bf7240f5c28", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34567", "issue:36331", "issue:43452", "issue:43531", "issue:43643", "issue:43653", "issue:43688", "issue:43749", "issue:43927", "issue:44514", "issue:44561", "issue:44792", "issue:44821", "issue:44843", "issue:44964", "issue:45290", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "`issue:44514` is the best overall issue candidate for this cluster: it is specific, actionable, and matches the only accepted soft edge; the other issues appear unrelated and should not be merged into the same canonical bug.", "best_pr_reason": null, "canonical_issue_reason": "`issue:44514` is the best anchor because it is the clearest representative of the only true duplicate-like pair in the set (`issue:45290`), and it describes the concrete `apply_chat_template` crash path more generally.", "canonical_pr_reason": null, "confidence": 0.63, "soft_edge_verdicts": [{"accept": false, "left": "issue:34567", "reason": "Different Trainer problems: one is missing `num_input_tokens_seen` updates, the other is a `compute_loss()` signature incompatibility.", "right": "issue:36331"}, {"accept": false, "left": "issue:43643", "reason": "Both mention `trust_remote_code`, but the failures are different: missing config fields vs removal of `is_torch_fx_available`.", "right": "issue:44561"}, {"accept": false, "left": "issue:43688", "reason": "Unrelated bugs: auxiliary loss normalization in MoE models vs broken FSDP CPU RAM-efficient loading.", "right": "issue:43749"}, {"accept": false, "left": "issue:44792", "reason": "Both are multimodal/model-load related, but one is a Janus test failure and the other is a Phi-4 multimodal loading regression.", "right": "issue:44964"}, {"accept": true, "left": "issue:44514", "reason": "These describe the same crash in `apply_chat_template` with assistant tool-call messages and missing content; the titles differ only by reproduction context.", "right": "issue:45290"}, {"accept": false, "left": "issue:43531", "reason": "Completely different areas: Qwen3-MoE sliding-window behavior vs AutoImageProcessor loading from URL.", "right": "issue:44821"}, {"accept": false, "left": "issue:43653", "reason": "Both involve token handling, but one is a BigBird tokenizer special-token registration bug and the other is DiaConfig losing custom token IDs on save/load.", "right": "issue:43927"}, {"accept": false, "left": "issue:44514", "reason": "Both are Qwen2.5-VL-related, but one is a chat-template/tool-call crash and the other is a video `vision_position_ids` issue.", "right": "issue:45381"}, {"accept": false, "left": "issue:43452", "reason": "Both concern loading behavior, but one is a `gguf_file` path issue and the other is an offline-mode regression in `_patch_mistral_regex`.", "right": "issue:44843"}], "summary": "This cluster is mostly heterogeneous: trainer metrics, config/load regressions, multimodal loading, tokenizer quirks, and chat-template failures. Only the two `apply_chat_template` reports look like the same underlying bug; the rest are false-positive similarities across different subsystems."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:34567|issue:36331", "issue:43643|issue:44561", "issue:43688|issue:43749", "issue:44792|issue:44964", "issue:44514|issue:45290", "issue:43531|issue:44821", "issue:43653|issue:43927", "issue:44514|issue:45381", "issue:43452|issue:44843"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6700, "estimated_input_tokens": 3222, "item_count": 17, "node_count": 17, "serialized_chars": 12888, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:45:39Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d68e9384738b74bed38824ea47f384365f969c2f77a7048176e5f2e9960535a7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41762", "issue:43295", "issue:43452", "issue:43504", "issue:43550", "issue:43577", "issue:43653", "issue:43697", "issue:43720", "issue:43742", "issue:44568", "issue:44589", "issue:44936", "issue:45042", "issue:45081", "issue:45216", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "No single issue is a good global representative because the reports are not duplicates of one another; they only share broad themes like loading or tokenization.", "best_pr_reason": null, "canonical_issue_reason": "No clear canonical issue: the items span distinct subsystems and model-specific regressions, so choosing one would misrepresent the cluster.", "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:43742", "reason": "Both are load-time failures, but they concern different models and different failure modes (key error vs missing Float8 storage object).", "right": "issue:44589"}, {"accept": false, "left": "issue:43697", "reason": "Different regressions in different models: RTDetrV2 output drift vs Qwen3.5 save_pretrained checkpoint corruption.", "right": "issue:45216"}, {"accept": false, "left": "issue:43504", "reason": "Unrelated loading bugs: BEiT legacy field handling vs BitNet packed-weight unpacking during accelerate loading.", "right": "issue:43720"}, {"accept": false, "left": "issue:45042", "reason": "Different code paths and symptoms: PIL image processors wrongly requiring torchvision vs Mistral regex patch crashing on tokenizer internals.", "right": "issue:45081"}, {"accept": false, "left": "issue:43452", "reason": "Both involve model loading, but one is gguf_file incompatibility for AutoTokenizer/AutoModel, the other is dtype propagation for Blip2/Qformer.", "right": "issue:43577"}, {"accept": false, "left": "issue:43504", "reason": "Different bugs entirely: BEiT pretrained preset loading vs Bamba torch.compile/SDPA behavior.", "right": "issue:43550"}, {"accept": false, "left": "issue:41762", "reason": "Gemma3 ZeRO-3 load-time IndexError and trainer.evaluate() after train() are different execution failures with no clear shared root cause.", "right": "issue:44936"}, {"accept": false, "left": "issue:43295", "reason": "Both mention tokenizer-related regressions, but they affect different models and different behaviors (processor.tokenizer/images passthrough vs Kimi-K2.5 codec and warning handling).", "right": "issue:45356"}, {"accept": false, "left": "issue:43653", "reason": "Both are special-token/tokenizer issues, but they target different tokenizers and different missing-token behaviors (mask token registration vs BOS/EOS insertion).", "right": "issue:44568"}], "summary": "This cluster is an over-grouping of unrelated bug reports across different models, tokenizers, loading paths, and training/runtime behaviors. None of the soft-edge pairs look like the same underlying defect."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43742|issue:44589", "issue:43697|issue:45216", "issue:43504|issue:43720", "issue:45042|issue:45081", "issue:43452|issue:43577", "issue:43504|issue:43550", "issue:41762|issue:44936", "issue:43295|issue:45356", "issue:43653|issue:44568"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7052, "estimated_input_tokens": 3398, "item_count": 18, "node_count": 18, "serialized_chars": 13591, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:45:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1c64270038d059633ae08e3ec3ccd4c1667527f343390d75117f7ebfa9d75703", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41720", "issue:43504", "issue:43525", "issue:43606", "issue:43643", "issue:43720", "issue:43906", "issue:43927", "issue:44464", "issue:44466", "issue:44610", "issue:44625", "issue:44743", "issue:44843", "issue:44877", "issue:44898", "issue:44991", "issue:45005"], "result": {"analyst_result": {"best_issue_reason": "Issue 44625 is the most suitable representative issue because it is active and well-discussed, while the rest are mostly isolated bugs affecting different models or subsystems.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44625 is the best anchor among this set: it is open, has the highest inbound reference count, and describes a concrete, current model-config propagation bug. That said, the cluster is not a true duplicate set.", "canonical_pr_reason": null, "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "issue:43606", "reason": "Both are inference-time failures, but they involve different models and different causes: CPU offload device mismatch vs image-size/interpolation behavior.", "right": "issue:44898"}, {"accept": false, "left": "issue:43504", "reason": "Different model families and different bugs: legacy config field loading for BEiT vs processor/model input size mismatch for OmDet-Turbo.", "right": "issue:44610"}, {"accept": false, "left": "issue:44625", "reason": "Both are config-loading issues, but the concrete failures differ: Qwen3.5 num_labels propagation vs granite_speech strict-config loading.", "right": "issue:44877"}, {"accept": false, "left": "issue:41720", "reason": "No clear same bug from the titles alone; one is a Qwen3 A800 cudaErrorAssert with auto device mapping, the other is just an isolated reproduction of a different upstream issue.", "right": "issue:43906"}, {"accept": false, "left": "issue:44464", "reason": "Both touch generation/state handling, but one is about compiled forward chunked generation and the other about recurrent state reset with cache; they do not look like the same code-path failure.", "right": "issue:44743"}, {"accept": false, "left": "issue:44843", "reason": "Both are tokenizer/loading related, but they target different failures and likely different code paths: offline model_info patching vs a specific tokenizer load regression.", "right": "issue:44991"}, {"accept": false, "left": "issue:43720", "reason": "Distinct problems: packed weights not being unpacked during accelerate loading vs config token IDs being lost on save/load.", "right": "issue:43927"}, {"accept": false, "left": "issue:43606", "reason": "Different subsystems and failure modes: device mismatch during CPU offload vs missing custom token IDs causing generation IndexError.", "right": "issue:43927"}, {"accept": false, "left": "issue:43525", "reason": "Related only at a high level (missing config fields), but one is a specific Llama4Config pad_token_id AttributeError and the other is a broader trust_remote_code AutoConfig missing-fields issue.", "right": "issue:43643"}, {"accept": false, "left": "issue:44466", "reason": "Both mention tied-weights/serialization, but one is a v5 serialization inconsistency while the other is about translation-model tied weights more generally; not the same concrete bug.", "right": "issue:45005"}], "summary": "These issues are mostly unrelated model/config/loading bugs across different models and code paths. None of the soft pairs look like true duplicates, so I would not merge any of them. If an anchor is needed, issue 44625 is the strongest representative because it is open, active, and has the most inbound discussion."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43606|issue:44898", "issue:43504|issue:44610", "issue:44625|issue:44877", "issue:41720|issue:43906", "issue:44464|issue:44743", "issue:44843|issue:44991", "issue:43720|issue:43927", "issue:43606|issue:43927", "issue:43525|issue:43643", "issue:44466|issue:45005"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6594, "estimated_input_tokens": 3169, "item_count": 17, "node_count": 17, "serialized_chars": 12674, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:46:15Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "388fac348dc40e137492f1e408b4a5d08843666f7ea00d9d6c3934476fddb030", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:24643", "issue:29127", "issue:38175", "issue:40444", "issue:43122", "issue:43295", "issue:43577", "issue:43697", "issue:43720", "issue:43819", "issue:43906", "issue:44361", "issue:44479", "issue:44811", "issue:44936", "issue:44977", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "issue:43295 has the strongest combination of clarity, reproducibility, and generality for acting as the representative issue in this cluster.", "best_pr_reason": null, "canonical_issue_reason": "issue:43295 is the clearest, most broadly representative bug report in the set: a concrete regression in a core multimodal API with a well-scoped failure mode and version boundary.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43819", "reason": "Different subsystems and failures: DAC latent reconstruction mismatch vs Whisper batch_decode ignoring a parameter.", "right": "issue:44811"}, {"accept": false, "left": "issue:43577", "reason": "Both are inference/loading regressions, but they concern different models and different code paths (BLIP2 dtype casting vs RTDetrV2 output changes).", "right": "issue:43697"}, {"accept": false, "left": "issue:40444", "reason": "Qwen2.5-VL multi-image IterableDataset finetuning failure is unrelated to Whisper decode behavior.", "right": "issue:44811"}, {"accept": false, "left": "issue:43720", "reason": "BitNet packed-weight loading and MLukeTokenizer task AttributeError are distinct bugs with no shared underlying fix.", "right": "issue:44361"}, {"accept": false, "left": "issue:24643", "reason": "A DeepSpeed training weight-shape runtime error is not the same issue as improving a LayoutLMv3 error message.", "right": "issue:29127"}, {"accept": false, "left": "issue:38175", "reason": "Different symptoms and code paths: unexpected zero probabilities in SigLIP2 vs processor.tokenizer/image-handling regression.", "right": "issue:43295"}, {"accept": false, "left": "issue:44977", "reason": "Separate tokenizer/model regressions for different models: Qwen3.5 flash-attention generation vs Kimi-K2.5 codec handling.", "right": "issue:45356"}, {"accept": false, "left": "issue:43122", "reason": "One is a tokenizer behavior change across versions; the other is an isolated reproduction of a different issue, not the same concrete bug.", "right": "issue:43906"}, {"accept": false, "left": "issue:44479", "reason": "Video-input regression in Qwen VL models is unrelated to trainer.evaluate() failing after trainer.train().", "right": "issue:44936"}], "summary": "The items are mostly unrelated Hugging Face issues that only share broad subsystem similarity (tokenizers, multimodal models, regressions). None of the soft pairs look like the same underlying bug, so all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43819|issue:44811", "issue:43577|issue:43697", "issue:40444|issue:44811", "issue:43720|issue:44361", "issue:24643|issue:29127", "issue:38175|issue:43295", "issue:44977|issue:45356", "issue:43122|issue:43906", "issue:44479|issue:44936"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7028, "estimated_input_tokens": 3386, "item_count": 18, "node_count": 18, "serialized_chars": 13542, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:46:32Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "705f83a4f305ad97222f26e2e09349098105808f945bb7efb3e55324b8728068", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:28282", "issue:30064", "issue:32090", "issue:33357", "issue:33666", "issue:35141", "issue:43329", "issue:43452", "issue:43504", "issue:43531", "issue:43577", "issue:43606", "issue:43742", "issue:44451", "issue:44464", "issue:44554", "issue:44977", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": "issue:43452 is the cleanest standalone regression report with a concrete, user-facing loading failure, but it does not subsume the other reports.", "best_pr_reason": null, "canonical_issue_reason": "No clear canonical issue: the items span unrelated failures in loading, training, image processing, multimodal indexing, and generation, so any single anchor would be arbitrary.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:30064", "reason": "Different bugs: image processor segmentation-map handling vs Qwen2-VL multi-GPU training behavior.", "right": "issue:33666"}, {"accept": false, "left": "issue:28282", "reason": "Different failure modes: missing PyTorch import error vs MacOS bus error with a community CLIP model.", "right": "issue:33357"}, {"accept": false, "left": "issue:28282", "reason": "Missing backend dependency is unrelated to the Trainer NoneType broadcast error.", "right": "issue:32090"}, {"accept": false, "left": "issue:43504", "reason": "Legacy model-field loading issue vs CPU offload device-mismatch issue; not the same code-path problem.", "right": "issue:43606"}, {"accept": false, "left": "issue:28282", "reason": "Import-time PyTorch availability error is unrelated to post-init embedding reinitialization after resizing.", "right": "issue:35141"}, {"accept": false, "left": "issue:43577", "reason": "Blip2 dtype propagation bug and MPS attention correctness issue are different model/runtime defects.", "right": "issue:44554"}, {"accept": false, "left": "issue:44464", "reason": "Both involve generation, but one is compiled-forward chunked generation inconsistency and the other is flash-attention behavior for Qwen3.5.", "right": "issue:44977"}, {"accept": false, "left": "issue:43329", "reason": "Different multimodal bugs: undefined video-branch helpers vs rope-index temporal scaling for Qwen2.5-VL images.", "right": "issue:45325"}, {"accept": false, "left": "issue:43452", "reason": "gguf_file loader regression is not the same as a general failure to load ScandiBERT.", "right": "issue:44451"}, {"accept": false, "left": "issue:43531", "reason": "Qwen3-MoE sliding-window issue and MobileLLM-125M key error are separate model-loading/configuration problems.", "right": "issue:43742"}], "summary": "These issues are only loosely similar at the framework/subsystem level; they do not describe the same underlying bug or change, so none of the soft pairs should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:30064|issue:33666", "issue:28282|issue:33357", "issue:28282|issue:32090", "issue:43504|issue:43606", "issue:28282|issue:35141", "issue:43577|issue:44554", "issue:44464|issue:44977", "issue:43329|issue:45325", "issue:43452|issue:44451", "issue:43531|issue:43742"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6836, "estimated_input_tokens": 3290, "item_count": 18, "node_count": 18, "serialized_chars": 13159, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:46:51Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b7b913733e6a3614b158a45de1cefcf230d2823a5cf6d2e307d17c4342cd7992", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41720", "issue:43012", "issue:43334", "issue:43504", "issue:43540", "issue:43572", "issue:43577", "issue:43653", "issue:43716", "issue:43927", "issue:44361", "issue:44464", "issue:44479", "issue:44625", "issue:44849", "issue:45200", "issue:45229", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": "If one issue must represent the group, issue 44849 is the least-bad anchor because it is a focused, active Qwen3.5 runtime bug with a clear symptom and discussion; however, it should not be used to subsume the other reports.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits the set well: the reports span distinct code paths (tokenizer/config loading, vision/video preprocessing, dtype handling, generation, OOM, and compilation/device mapping) with different failure modes.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43504", "reason": "Both involve model/token handling, but one is BEiT preset loading with a legacy field and the other is BigBirdTokenizer special-token registration/decoding; different components and defects.", "right": "issue:43653"}, {"accept": false, "left": "issue:44849", "reason": "Same model family, but different bugs: output_hidden_states behavior versus incorrect save_pretrained visual-encoder keys. Not the same fixable code path.", "right": "issue:45357"}, {"accept": false, "left": "issue:45200", "reason": "Gemma4 token-type defaults for text-only fine-tuning and a multi-GPU CUDA OOM are unrelated symptoms and likely unrelated causes.", "right": "issue:45229"}, {"accept": false, "left": "issue:43927", "reason": "DiaConfig losing custom token IDs after save/load and MLukeTokenizer raising AttributeError on tasks are different config/tokenizer failures.", "right": "issue:44361"}, {"accept": false, "left": "issue:43012", "reason": "Both mention dtype, but one is a PyTorch warning during bfloat16 compilation and the other is BLIP-2 remaining float32 after load; not the same bug.", "right": "issue:43577"}, {"accept": false, "left": "issue:44479", "reason": "A Qwen video-input regression across several models and Qwen3.5 num_labels propagation are separate config/regression issues.", "right": "issue:44625"}, {"accept": false, "left": "issue:43334", "reason": "Both are missing-token-field regressions, but they affect different models and different fields (pad_token_id vs pad_token_idx) with different load-time failures.", "right": "issue:43572"}, {"accept": false, "left": "issue:43540", "reason": "Qwen3OmniMoe video processing ValueError and Mistral-3 image-preprocessor/model dtype mismatch are unrelated code paths.", "right": "issue:43716"}, {"accept": false, "left": "issue:41720", "reason": "Qwen3 auto device mapping cudaErrorAssert and chunked generation inconsistency under compiled forward are different execution-mode failures, not one underlying bug.", "right": "issue:44464"}], "summary": "These items are mostly unrelated model/config regressions that only share broad themes like token IDs, dtype, or video inputs. None of the soft pairs looks like the same underlying bug, so no mergeable duplicate cluster emerges here."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43504|issue:43653", "issue:44849|issue:45357", "issue:45200|issue:45229", "issue:43927|issue:44361", "issue:43012|issue:43577", "issue:44479|issue:44625", "issue:43334|issue:43572", "issue:43540|issue:43716", "issue:41720|issue:44464"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6914, "estimated_input_tokens": 3329, "item_count": 18, "node_count": 18, "serialized_chars": 13316, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:47:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "14e7e67c062ae572d2c548ce8a22a3a292e79160eaaf1f7900c484e46c564416", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:32090", "issue:33666", "issue:34567", "issue:43262", "issue:43278", "issue:43493", "issue:43525", "issue:43572", "issue:43688", "issue:43742", "issue:43756", "issue:43825", "issue:43873", "issue:44488", "issue:44821", "issue:44843", "issue:44913", "issue:45084"], "result": {"analyst_result": {"best_issue_reason": "Issue 43873 is the broadest/most active open issue in the set, but it is still not a good canonical representative because it concerns a different problem than the others.", "best_pr_reason": "No pull requests are present in this cluster.", "canonical_issue_reason": "No single canonical issue stands out; the issues are distinct bugs in different subsystems/models, and the apparent matches are only shallowly similar.", "canonical_pr_reason": "No pull requests are present in this cluster.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43525", "reason": "Both are missing-pad-token config regressions, but they affect different models and different fields (`pad_token_id` vs `pad_token_idx`), so they are not the same bug.", "right": "issue:43572"}, {"accept": false, "left": "issue:43742", "reason": "These are different loading failures affecting different components (`facebook/MobileLLM-125M` key error vs `AutoImageProcessor` URL loading).", "right": "issue:44821"}, {"accept": false, "left": "issue:32090", "reason": "`Trainer` GPU broadcast TypeError is a separate training error from Qwen2-VL multi-GPU training; same area, different failure mode and cause.", "right": "issue:33666"}, {"accept": false, "left": "issue:32090", "reason": "One is a broadcast/type error during training; the other is `num_input_tokens_seen` not updating in `TrainerState`.", "right": "issue:34567"}, {"accept": false, "left": "issue:43756", "reason": "Both touch RoPE/config behavior, but one is about dropped layers in Smollm3 while the other is about `rotary_pct` not persisting on reload; not the same defect.", "right": "issue:44913"}, {"accept": false, "left": "issue:43278", "reason": "Different model/training issues: dtype mismatch in evaluation vs auxiliary-loss normalization.", "right": "issue:43688"}, {"accept": false, "left": "issue:43493", "reason": "One reports a SigLIP2 implementation discrepancy; the other is a pipeline error message incorrectly claiming translation support in v5.", "right": "issue:43825"}, {"accept": false, "left": "issue:43262", "reason": "Audio chat-template sampling-rate defaulting and a template compilation TypeError are unrelated bugs.", "right": "issue:45084"}, {"accept": false, "left": "issue:44488", "reason": "Both are model/tokenizer loading complaints, but one is about a specific model failing to load and the other is an offline-mode regression in `_patch_mistral_regex`.", "right": "issue:44843"}, {"accept": false, "left": "issue:43688", "reason": "Aux-loss normalization and offloading-with-quantization are different technical problems with no clear shared code-path.", "right": "issue:43873"}], "summary": "This cluster is heterogeneous: the items cover unrelated bugs across training, loading, config serialization, audio/text processing, and model-specific behavior. None of the soft-similarity pairs look like the same underlying issue, so no duplicates should be merged here."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43525|issue:43572", "issue:43742|issue:44821", "issue:32090|issue:33666", "issue:32090|issue:34567", "issue:43756|issue:44913", "issue:43278|issue:43688", "issue:43493|issue:43825", "issue:43262|issue:45084", "issue:44488|issue:44843", "issue:43688|issue:43873"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7024, "estimated_input_tokens": 3384, "item_count": 18, "node_count": 18, "serialized_chars": 13534, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:47:24Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "25728efb887f0881026350e49923c710e7e530165c755b503633b49c7b1efaec", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33666", "issue:36010", "issue:39692", "issue:43257", "issue:43278", "issue:43299", "issue:43493", "issue:43540", "issue:43756", "issue:43844", "issue:44060", "issue:44423", "issue:44734", "issue:44964", "issue:44977", "issue:45072", "issue:45198", "issue:45405"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43278", "reason": "Different failure modes: BF16-to-FP32 dtype drift in evaluation vs gradient explosion with ZeRO-3 during random-init training.", "right": "issue:43844"}, {"accept": false, "left": "issue:33666", "reason": "Unrelated topics: Qwen2-VL multi-GPU training vs a GenerationMixin import error in transformers.generation.", "right": "issue:36010"}, {"accept": false, "left": "issue:43756", "reason": "Different Qwen3-related bugs: RoPE layer drop in Smollm3 vs an incorrect tied-weights warning for Qwen3-Next.", "right": "issue:44060"}, {"accept": false, "left": "issue:43257", "reason": "Both involve Qwen3/MoE loading, but one is an accelerate+deepspeed weight-conversion issue and the other is a transformers 5.0.0.dev0 regression for Qwen3VL MoE models.", "right": "issue:43299"}, {"accept": false, "left": "issue:45072", "reason": "Different models and symptoms: dtype mismatches in SwitchTransformers/TimmWrapperModel vs Wav2Vec2 save_pretrained/tokenization failures.", "right": "issue:45198"}, {"accept": false, "left": "issue:43540", "reason": "Same broad Qwen family, but distinct bugs: video-input processing ValueError in Qwen3OmniMoe vs flash-attention generation issues in Qwen3.5.", "right": "issue:44977"}, {"accept": false, "left": "issue:44423", "reason": "Both are serve crashes, but one is continuous-batching multimodal AttributeError and the other is KV-cache continuation tensor-indexing failure.", "right": "issue:44734"}, {"accept": false, "left": "issue:43493", "reason": "Different subsystems entirely: SigLIP2 implementation mismatch vs DeepSpeed ZeRO-3 gradient growth during training.", "right": "issue:43844"}, {"accept": false, "left": "issue:44964", "reason": "Model load failure for Phi-4-multimodal-instruct is unrelated to a PEFT minimum-version bump.", "right": "issue:45405"}, {"accept": false, "left": "issue:39692", "reason": "Same SigLIP2 area, but the first is a broken documentation example while the second is an HF-vs-JAX implementation discrepancy; not the same underlying bug.", "right": "issue:43493"}], "summary": "This cluster is a noisy mix of unrelated model-specific bugs, docs issues, and infrastructure regressions across Qwen, SigLIP, Wav2Vec2, DeepSpeed, serve, and PEFT. I do not see a single underlying duplicate bug/change tying them together, and none of the soft pairs look mergeable as the same fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43278|issue:43844", "issue:33666|issue:36010", "issue:43756|issue:44060", "issue:43257|issue:43299", "issue:45072|issue:45198", "issue:43540|issue:44977", "issue:44423|issue:44734", "issue:43493|issue:43844", "issue:44964|issue:45405", "issue:39692|issue:43493"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6940, "estimated_input_tokens": 3342, "item_count": 18, "node_count": 18, "serialized_chars": 13368, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:47:42Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d624ee165e1041eb1bc887d7a00fda4825f933dac8a71d1a6957d75da29842c7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33666", "issue:36331", "issue:39692", "issue:43295", "issue:43329", "issue:43572", "issue:43606", "issue:43646", "issue:43688", "issue:43746", "issue:43824", "issue:43994", "issue:44291", "issue:44466", "issue:44493", "issue:44521", "issue:44589", "issue:45290"], "result": {"analyst_result": {"best_issue_reason": "If a representative issue is needed, #44521 is the broadest open bug in the set and sits in the most reusable user-facing area (multimodal chat templating), but it is not a duplicate anchor for the rest of the cluster.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits the whole set; the issues are too heterogeneous to consolidate into one duplicate target.", "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:43824", "reason": "Different failure modes and different subsystems: Qwen2.5-VL import/export naming vs Float8 storage lookup during loading.", "right": "issue:44589"}, {"accept": false, "left": "issue:33666", "reason": "Multi-GPU training for Qwen2-VL is unrelated to a CustomTrainer compute_loss signature regression.", "right": "issue:36331"}, {"accept": false, "left": "issue:44521", "reason": "Both involve apply_chat_template, but one is all-zero assistant masks for multimodal inputs while the other is a tokenize=True crash with tool-call messages; different bugs.", "right": "issue:45290"}, {"accept": false, "left": "issue:43688", "reason": "Auxiliary-loss normalization in MoE models is unrelated to GraniteSpeech PEFT adapter loading from local checkpoints.", "right": "issue:43746"}, {"accept": false, "left": "issue:43572", "reason": "Both are 5.0-era regressions, but one is a missing config field and the other is an init_empty_weights constructor-argument TypeError.", "right": "issue:44291"}, {"accept": false, "left": "issue:43994", "reason": "SigLIP2 wrong outputs and unexpected position-id keys are distinct model-behavior issues with no clear shared code path.", "right": "issue:44493"}, {"accept": false, "left": "issue:43646", "reason": "Custom model initialization breakage is not the same as device-dependent lm_head.weight serialization inconsistency.", "right": "issue:44466"}, {"accept": false, "left": "issue:43329", "reason": "Undefined variables in multimodal token counting are unrelated to a CPU offload device-mismatch failure in bark-small.", "right": "issue:43606"}, {"accept": false, "left": "issue:39692", "reason": "Both touch multimodal processor/tokenizer usage, but one is a doc-example/model mismatch and quantization failure while the other is a regression around processor.tokenizer and image tokenization.", "right": "issue:43295"}], "summary": "This cluster is mostly a false-positive similarity group: the items cover unrelated issues across training, model loading, config regressions, and multimodal chat templating. None of the soft pairs look like the same underlying bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43824|issue:44589", "issue:33666|issue:36331", "issue:44521|issue:45290", "issue:43688|issue:43746", "issue:43572|issue:44291", "issue:43994|issue:44493", "issue:43646|issue:44466", "issue:43329|issue:43606", "issue:39692|issue:43295"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6618, "estimated_input_tokens": 3181, "item_count": 17, "node_count": 17, "serialized_chars": 12724, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:48:03Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f9f645fd11c6d6587283b65d11673e7d639f13353e6944a4c824dc9985e7f2f5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:15354", "issue:33357", "issue:33666", "issue:38175", "issue:39401", "issue:43232", "issue:43257", "issue:43278", "issue:43540", "issue:43575", "issue:43701", "issue:44315", "issue:44568", "issue:44792", "issue:44849", "issue:45200", "issue:45405"], "result": {"analyst_result": {"best_issue_reason": "No issue is a strong global representative for deduping, because the items do not describe the same underlying defect.", "best_pr_reason": null, "canonical_issue_reason": "No single issue is a clear canonical; the cluster contains unrelated bugs across different models, tokenizers, training, and versioning.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:33357", "reason": "Different problems: a MacOS bus error in a pretrained CLIP model vs multi-GPU training for Qwen2-VL.", "right": "issue:33666"}, {"accept": false, "left": "issue:39401", "reason": "Both are tokenizer-related, but one is a wrong offset_mapping bug for Qwen3 while the other is missing BOS/EOS insertion for mdeberta-v3-base.", "right": "issue:44568"}, {"accept": false, "left": "issue:15354", "reason": "Unrelated model issues: torch.jit.script export failure for ViT vs zero probabilities from siglip2-base-patch16-224.", "right": "issue:38175"}, {"accept": false, "left": "issue:43575", "reason": "A tensor-parallel OOM on Qwen2-57B is unrelated to a PEFT version bump/release problem.", "right": "issue:45405"}, {"accept": false, "left": "issue:43232", "reason": "Both involve generation-adjacent behavior, but one is a sync_gpus model-kwarg update issue and the other is output_hidden_states handling in Qwen3.5.", "right": "issue:44849"}, {"accept": false, "left": "issue:44792", "reason": "A Janus image-generation test failure is unrelated to the MIN_PEFT_VERSION release bump.", "right": "issue:45405"}, {"accept": false, "left": "issue:43257", "reason": "Qwen3 MoE weight conversion under accelerate+deepspeed is a model-loading bug, not a dependency-version release issue.", "right": "issue:45405"}, {"accept": false, "left": "issue:43540", "reason": "Different areas: Qwen3OmniMoe video-processing ValueError vs Gemma 4 mm_token_type_ids defaults for text-only fine-tuning.", "right": "issue:45200"}, {"accept": false, "left": "issue:43278", "reason": "Embedding dtype drift in eval is unrelated to a resume_from_checkpoint key mismatch.", "right": "issue:43701"}, {"accept": false, "left": "issue:44315", "reason": "Liger Kernel application with model_init is unrelated to Qwen3.5 output_hidden_states behavior.", "right": "issue:44849"}], "summary": "The cluster is heterogeneous: the issues span unrelated models, tokenizer behavior, training/loading, generation, and release/version problems. No pair looks like the same underlying bug/change, so none should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:33357|issue:33666", "issue:39401|issue:44568", "issue:15354|issue:38175", "issue:43575|issue:45405", "issue:43232|issue:44849", "issue:44792|issue:45405", "issue:43257|issue:45405", "issue:43540|issue:45200", "issue:43278|issue:43701", "issue:44315|issue:44849"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6904, "estimated_input_tokens": 3324, "item_count": 18, "node_count": 18, "serialized_chars": 13296, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:48:37Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "22233a797cd206fd54896003450e3bb10f8ec3b016bc0e1629759144d3a716e8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:37428", "issue:43334", "issue:43525", "issue:43582", "issue:43606", "issue:43688", "issue:43844", "issue:43957", "issue:44112", "issue:44442", "issue:44534", "issue:44554", "issue:44964", "issue:44977", "issue:45198", "issue:45200", "issue:45325", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "issue:43525 is the strongest representative issue overall: it captures the shared config-attribute regression more generically than the Qwen3-VL-specific report.", "best_pr_reason": null, "canonical_issue_reason": "issue:43525 is the best canonical issue because it is the broader, more generic pad_token_id AttributeError report; it best represents the only near-duplicate bug class in the set.", "canonical_pr_reason": null, "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "issue:43844", "reason": "Different bugs: DeepSpeed/ZeRO-3 gradient growth vs model loading failure for Phi-4 multimodal.", "right": "issue:44964"}, {"accept": false, "left": "issue:43582", "reason": "Different subsystems: Apple Silicon caching_allocator_warmup TypeError vs Qwen flash-attention generation issue.", "right": "issue:44977"}, {"accept": false, "left": "issue:37428", "reason": "Both are loading/import errors, but the concrete failures differ: flash-attention import symbol vs meta-device model loading regression.", "right": "issue:43957"}, {"accept": false, "left": "issue:43606", "reason": "Both involve device handling, but one is CPU offload device mismatch and the other is a CI test about stale device overrides; not the same bug.", "right": "issue:44112"}, {"accept": false, "left": "issue:43688", "reason": "Different model math problems: auxiliary loss normalization vs non-persistent buffer initialization junk.", "right": "issue:44534"}, {"accept": false, "left": "issue:45200", "reason": "Different multimodal/tokenization behaviors: Gemma 4 mm_token_type_ids defaulting vs Qwen2.5-VL rope index scaling.", "right": "issue:45325"}, {"accept": false, "left": "issue:44554", "reason": "Unrelated problems: MPS attention correctness vs tokenizer codec handling/regression.", "right": "issue:45356"}, {"accept": true, "left": "issue:43334", "reason": "Same underlying symptom class: both report AttributeError for missing pad_token_id on config objects, suggesting the same config-API regression across models.", "right": "issue:43525"}, {"accept": false, "left": "issue:44442", "reason": "Both concern tokenization, but they affect different speech models and different failure modes (AutoTokenizer load vs save_pretrained/tokenization failure).", "right": "issue:45198"}], "summary": "This cluster is mostly heterogeneous issue reports across unrelated regressions. The only plausible duplicate pair is the two pad_token_id AttributeErrors in model configs; everything else targets different code paths or subsystems and should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43844|issue:44964", "issue:43582|issue:44977", "issue:37428|issue:43957", "issue:43606|issue:44112", "issue:43688|issue:44534", "issue:45200|issue:45325", "issue:44554|issue:45356", "issue:43334|issue:43525", "issue:44442|issue:45198"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6912, "estimated_input_tokens": 3328, "item_count": 18, "node_count": 18, "serialized_chars": 13312, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:48:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3f0f00d56144b07876c68ac0b00b7b4dc44e3b0d5b465bf7e688565eb682aef5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:37428", "issue:40990", "issue:43295", "issue:43388", "issue:43493", "issue:43526", "issue:43572", "issue:43701", "issue:43824", "issue:44360", "issue:44466", "issue:44561", "issue:44589", "issue:44610", "issue:44625", "issue:44898", "issue:45127", "issue:45412"], "result": {"analyst_result": {"best_issue_reason": "#44625 is the best overall issue to anchor discussion: it is current, specific, and has the strongest evidence of active user impact.", "best_pr_reason": null, "canonical_issue_reason": "No true duplicate cluster stands out here; if one issue must serve as the representative tracker, #44625 is the clearest because it is open, concrete, and heavily referenced.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:37428", "reason": "Both are import errors, but they concern different symbols and different model areas; no shared code path is evident.", "right": "issue:43824"}, {"accept": false, "left": "issue:44898", "reason": "One is an image-size/processor mismatch for Perceiver, the other is RT-DETR memory cleanup; unrelated bugs.", "right": "issue:45412"}, {"accept": false, "left": "issue:43388", "reason": "Both involve label handling, but one is in evaluation batch gathering and the other is in BeitImageProcessorFast label reduction; different failure modes and fixes.", "right": "issue:43526"}, {"accept": false, "left": "issue:44466", "reason": "Serialization of tied weights on device placement is not the same problem as LoRA merge collapse after vocabulary extension.", "right": "issue:45127"}, {"accept": false, "left": "issue:43295", "reason": "These affect different pathways: processor/tokenizer API regression versus OmDet-Turbo image preprocessing size mismatch.", "right": "issue:44610"}, {"accept": false, "left": "issue:43493", "reason": "SigLIP2 implementation discrepancy and resume-from-checkpoint key mismatch are unrelated subsystems with different root causes.", "right": "issue:43701"}, {"accept": false, "left": "issue:40990", "reason": "A perplexity regression on gpt-oss-20b is unrelated to Qwen3.5 num_labels propagation into text config.", "right": "issue:44625"}, {"accept": false, "left": "issue:44360", "reason": "A missing ReLU in the DSA indexer and removal of is_torch_fx_available causing trust_remote_code breakage are distinct issues.", "right": "issue:44561"}, {"accept": false, "left": "issue:43572", "reason": "Missing pad_token_idx in StableLmConfig is unrelated to the Float8 storage deserialization TypeError.", "right": "issue:44589"}], "summary": "These items are mostly unrelated bug reports across different components; none of the soft pairs look like the same underlying defect or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:37428|issue:43824", "issue:44898|issue:45412", "issue:43388|issue:43526", "issue:44466|issue:45127", "issue:43295|issue:44610", "issue:43493|issue:43701", "issue:40990|issue:44625", "issue:44360|issue:44561", "issue:43572|issue:44589"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6792, "estimated_input_tokens": 3268, "item_count": 18, "node_count": 18, "serialized_chars": 13070, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:49:14Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "07599ad40ab752173b5e431d6dacbbf78412e8402c4e9b47526aa6e0311451fb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38175", "issue:43572", "issue:43819", "issue:43824", "issue:43844", "issue:43873", "issue:43975", "issue:44464", "issue:44514", "issue:44521", "issue:44625", "issue:44849", "issue:44871", "issue:44936", "issue:44987", "issue:45003", "issue:45083", "issue:45412"], "result": {"analyst_result": {"best_issue_reason": "Issue 44514 is the best overall issue candidate for triage because it is specific, well-scoped, and appears to have the richest community signal; however, it is not a duplicate of the other items.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44514 is the strongest anchor if one must be chosen: it is concrete, has the highest discussion/inbound activity among the set, and describes a clearly reproducible bug with a specific API and failure mode.", "canonical_pr_reason": null, "confidence": 0.87, "soft_edge_verdicts": [{"accept": false, "left": "issue:43572", "reason": "Both involve loading/config regressions, but one is a StableLmConfig pad_token_idx omission and the other is a transformers>=5.1.0 loading failure for a specific model; different model families and likely different code paths.", "right": "issue:44987"}, {"accept": false, "left": "issue:43844", "reason": "ZeRO-3 gradient inflation during training and trainer.evaluate() failing after trainer.train() are different lifecycle bugs with different symptoms and fixes.", "right": "issue:44936"}, {"accept": false, "left": "issue:43873", "reason": "Quantization/offloading behavior is unrelated to chunked generation inconsistencies under compiled forward; same broad generation/runtime area, but not the same bug.", "right": "issue:44464"}, {"accept": false, "left": "issue:44514", "reason": "Both touch apply_chat_template, but one is a batched-input crash with padding=False while the other is all-zero assistant masks for multimodal inputs; distinct failures.", "right": "issue:44521"}, {"accept": false, "left": "issue:44625", "reason": "Both are config propagation mismatches, but they concern different models and different fields (num_labels vs eos_token_id), so they are not the same defect.", "right": "issue:44871"}, {"accept": false, "left": "issue:43824", "reason": "ImportError for Qwen2_5_VLForConditionalGeneration is a class export/load issue, while DeepSeek detokenization in v5 is a tokenizer/output bug; no shared underlying change.", "right": "issue:43975"}, {"accept": false, "left": "issue:38175", "reason": "Unexpected zero probabilities in SigLIP2 inference and RT-DETR memory not being released are unrelated model-behavior problems.", "right": "issue:45412"}, {"accept": false, "left": "issue:44849", "reason": "A Qwen3.5 output_hidden_states bug and unsafe sys.modules access in modeling_utils are different code locations and failure modes.", "right": "issue:45003"}, {"accept": false, "left": "issue:43819", "reason": "DAC.from_latents mismatching the forward pass and qwen3_omni_moe helper length behavior are separate issues in different models/functions.", "right": "issue:45083"}], "summary": "These issues are mostly thematically related within Transformers, but they describe different failures, models, and code paths. None of the soft pairs look like true duplicates or mergeable as the same underlying bug/change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43572|issue:44987", "issue:43844|issue:44936", "issue:43873|issue:44464", "issue:44514|issue:44521", "issue:44625|issue:44871", "issue:43824|issue:43975", "issue:38175|issue:45412", "issue:44849|issue:45003", "issue:43819|issue:45083"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6960, "estimated_input_tokens": 3352, "item_count": 18, "node_count": 18, "serialized_chars": 13406, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:49:34Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "57cb187bd77a1e183f659ad4949c952ac4e3ec739987912c4934c50a5de497ab", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42915", "issue:43278", "issue:43295", "issue:43450", "issue:43575", "issue:43606", "issue:43824", "issue:43827", "issue:43856", "issue:43873", "issue:43901", "issue:43994", "issue:44462", "issue:44464", "issue:44561", "issue:44568", "issue:44991", "issue:45083"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:42915", "reason": "Different models and failure modes: Qwen3MoE FP8 config vs Qwen2.5-VL import error. No shared code-path or fix.", "right": "issue:43824"}, {"accept": false, "left": "issue:43575", "reason": "OOM during tensor-parallel load is unrelated to chunked generation inconsistency under compiled forward.", "right": "issue:44464"}, {"accept": false, "left": "issue:43295", "reason": "Custom processor/tokenizer regression is not the same as quantization offloading not working; different subsystems and symptoms.", "right": "issue:43873"}, {"accept": false, "left": "issue:43278", "reason": "One is a dtype shift between train/eval, the other is MoE training memory usage. Related area, but not the same bug.", "right": "issue:43856"}, {"accept": false, "left": "issue:44462", "reason": "AutoTokenizer ignoring tokenizer.json is a tokenizer-loading issue, not the v5 removal of is_torch_fx_available.", "right": "issue:44561"}, {"accept": false, "left": "issue:43295", "reason": "Both involve model/processor behavior, but they affect different models and different regressions; not a single fixable bug.", "right": "issue:43994"}, {"accept": false, "left": "issue:43827", "reason": "Both are documentation-related, but they cover different pipeline APIs and different stale references.", "right": "issue:43901"}, {"accept": false, "left": "issue:44568", "reason": "Different tokenizer failures on different models; one is special-token handling, the other is loading failure.", "right": "issue:44991"}, {"accept": false, "left": "issue:43295", "reason": "Processor/tokenizer regression is unrelated to batched video processor shape handling.", "right": "issue:43450"}, {"accept": false, "left": "issue:43606", "reason": "CPU offload device mismatch and Qwen3 Omni feature-length helper behavior are separate issues with no common concrete code path.", "right": "issue:45083"}], "summary": "This cluster is heterogeneous: it mixes model-specific regressions, tokenizer/import issues, docs bugs, and training/offload behavior. None of the soft pairs look like the same underlying bug or change, so there is no credible duplicate canonical here."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42915|issue:43824", "issue:43575|issue:44464", "issue:43295|issue:43873", "issue:43278|issue:43856", "issue:44462|issue:44561", "issue:43295|issue:43994", "issue:43827|issue:43901", "issue:44568|issue:44991", "issue:43295|issue:43450", "issue:43606|issue:45083"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6966, "estimated_input_tokens": 3355, "item_count": 18, "node_count": 18, "serialized_chars": 13419, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:49:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9b3f1b30a4cf7ce2cfa30728f15b865aab263cf97cecf2b901d21e0f4a952785", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:37428", "issue:38175", "issue:43425", "issue:43450", "issue:43493", "issue:43653", "issue:44112", "issue:44351", "issue:44488", "issue:44561", "issue:44568", "issue:44610", "issue:44779", "issue:44857", "issue:45127", "issue:45245", "issue:45290", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "issue:44561 is the most actionable and broadly applicable issue here; it describes a concrete compatibility break rather than a narrow model-specific symptom.", "best_pr_reason": null, "canonical_issue_reason": "issue:44561 is the broadest and most central regression report in the set: it identifies a v5 removal breaking trust_remote_code models, which makes it the best representative of the cluster\u2019s general regression theme.", "canonical_pr_reason": null, "confidence": 0.73, "soft_edge_verdicts": [{"accept": false, "left": "issue:44561", "reason": "Both mention v5 regressions, but they affect different components and symptoms: trust_remote_code compatibility vs DeepSeek tokenizer output.", "right": "issue:44779"}, {"accept": false, "left": "issue:44610", "reason": "Different model families and different failures: processor input size mismatch vs AMP/CUDA loss crash.", "right": "issue:44857"}, {"accept": false, "left": "issue:38175", "reason": "Both involve SigLIP2, but one is a zero-probability symptom and the other is a broader HF-vs-JAX discrepancy; the underlying bug is not explicitly the same.", "right": "issue:43493"}, {"accept": false, "left": "issue:45127", "reason": "Unrelated bugs: LoRA merge with tied embeddings vs a categorical cardinality runtime limit.", "right": "issue:45245"}, {"accept": false, "left": "issue:44488", "reason": "Both are tokenizer regressions, but they concern different models and different failure modes.", "right": "issue:44568"}, {"accept": false, "left": "issue:45290", "reason": "Different subsystems entirely: chat template/tool-call handling vs Qwen2.5-VL video position ids.", "right": "issue:45381"}, {"accept": false, "left": "issue:43450", "reason": "Video processor shape bug vs SigLIP2 implementation discrepancy; no shared code-path is evident.", "right": "issue:43493"}, {"accept": false, "left": "issue:37428", "reason": "An import error for a flash-attention helper is not the same bug as generic Torch 2.10 incompatibility.", "right": "issue:43425"}, {"accept": false, "left": "issue:43653", "reason": "Different areas and symptoms: BigBird tokenizer special-token registration vs a GraniteSpeech CI device-override test failure.", "right": "issue:44112"}, {"accept": false, "left": "issue:37428", "reason": "Both are import-related, but they reference different missing symbols and likely different breakages.", "right": "issue:44351"}], "summary": "These issues are mostly distinct bug reports with only superficial topic overlap (especially v5 regressions and model/tokenizer complaints). I did not find any soft pair that clearly describes the same underlying bug, so all soft edges are rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44561|issue:44779", "issue:44610|issue:44857", "issue:38175|issue:43493", "issue:45127|issue:45245", "issue:44488|issue:44568", "issue:45290|issue:45381", "issue:43450|issue:43493", "issue:37428|issue:43425", "issue:43653|issue:44112", "issue:37428|issue:44351"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7060, "estimated_input_tokens": 3402, "item_count": 18, "node_count": 18, "serialized_chars": 13606, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:50:13Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2149cf30c533e6c51758fe8d5416d5e71c99edcd094450258a371c6b606a15cc", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29942", "issue:32090", "issue:39692", "issue:43262", "issue:43278", "issue:43450", "issue:43526", "issue:43575", "issue:43819", "issue:43825", "issue:44112", "issue:44186", "issue:44265", "issue:44448", "issue:44743", "issue:44855", "issue:45083", "issue:45198"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:44186", "reason": "Different models and failure modes: LayoutLMv2 tokenizer crashes on NER/batching, while Wav2Vec2 has save_pretrained/tokenization issues.", "right": "issue:45198"}, {"accept": false, "left": "issue:44448", "reason": "Unrelated behaviors: Pegasus v4/v5 output drift versus Qwen3 recurrent state reset with cache.", "right": "issue:44743"}, {"accept": false, "left": "issue:39692", "reason": "One is a SigLIP2 documentation/example mismatch and quantization failure; the other is a BeitImageProcessorFast label-reduction bug.", "right": "issue:43526"}, {"accept": false, "left": "issue:29942", "reason": "Flash Attention 2 test failures and Trainer _gpu_broadcast_one NoneType errors are separate subsystems and symptoms.", "right": "issue:32090"}, {"accept": false, "left": "issue:43278", "reason": "BF16-to-FP32 dtype change in evaluate is unrelated to the pipeline translation error-message bug.", "right": "issue:43825"}, {"accept": false, "left": "issue:44265", "reason": "torch.export with torch_compilable_check and Wav2Vec2 save/tokenization failures are different code paths and concrete bugs.", "right": "issue:45198"}, {"accept": false, "left": "issue:43450", "reason": "Batched video-shape handling is unrelated to Qwen2-57B-A14B-Instruct tensor-parallel OOM.", "right": "issue:43575"}, {"accept": false, "left": "issue:43819", "reason": "DAC.from_latents mismatch due to missing STE is not the same as a stale GraniteSpeech device-override CI test.", "right": "issue:44112"}, {"accept": false, "left": "issue:44855", "reason": "Python 3.13 import/IndentationError in DebertaV2Model is unrelated to the qwen3_omni_moe helper-length behavior.", "right": "issue:45083"}, {"accept": false, "left": "issue:43262", "reason": "Audio chat-template sampling-rate default bug is unrelated to video processor batching shape.", "right": "issue:43450"}], "summary": "The cluster is heterogeneous: the soft pairs mention similar high-level areas, but each pair points to a different concrete bug or change, so none should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44186|issue:45198", "issue:44448|issue:44743", "issue:39692|issue:43526", "issue:29942|issue:32090", "issue:43278|issue:43825", "issue:44265|issue:45198", "issue:43450|issue:43575", "issue:43819|issue:44112", "issue:44855|issue:45083", "issue:43262|issue:43450"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6638, "estimated_input_tokens": 3191, "item_count": 17, "node_count": 17, "serialized_chars": 12762, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:50:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "24594671a6e67c23074c8deaf7d3cf6698c51e0303e822f100dc15593c20a8b3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38175", "issue:39692", "issue:43066", "issue:43404", "issue:43550", "issue:43611", "issue:43819", "issue:43931", "issue:44448", "issue:44462", "issue:44493", "issue:44779", "issue:44977", "issue:45083", "issue:45200", "issue:45229", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "issue:44493 is the best representative issue for the cluster, but only as a loose thematic anchor; the set is not a single underlying bug.", "best_pr_reason": null, "canonical_issue_reason": "issue:44493 is the closest umbrella candidate because it is the broadest, most user-facing regression report in the set and relates to the shared position-id/theme that appears in a few other issues.", "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:45200", "reason": "Both mention multimodal/token-position inputs, but they affect different models and different fields (`mm_token_type_ids` vs `vision_position_ids`) with different failure modes.", "right": "issue:45381"}, {"accept": false, "left": "issue:43404", "reason": "Unrelated bugs: tied `lm_head` weights in Mistral3 vs missing/default `mm_token_type_ids` in Gemma 4. Different code paths and symptoms.", "right": "issue:45200"}, {"accept": false, "left": "issue:43611", "reason": "Both are model-loading problems, but one is about `base_model_prefix` handling while the other is a Qwen3-VL shape mismatch. Not the same underlying defect.", "right": "issue:43931"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 docs/example errors are about model/processor mismatch and quantization, not the broader position-id warning regression in 44493.", "right": "issue:44493"}, {"accept": false, "left": "issue:43819", "reason": "DAC `from_latents` vs forward mismatch is an audio-model implementation bug, not related to unexpected keys in position IDs.", "right": "issue:44493"}, {"accept": false, "left": "issue:38175", "reason": "SigLIP2 zero probabilities and unexpected position-id keys are different failures in different models; only a very broad transformer-v5 regression theme is shared.", "right": "issue:44493"}, {"accept": false, "left": "issue:44448", "reason": "Both are v4/v5 output regressions, but they concern different models and likely different tokenizer/decoding behavior. Too broad to merge as one bug.", "right": "issue:44779"}, {"accept": false, "left": "issue:44977", "reason": "Flash-attention generation failure and multi-GPU CUDA OOM are unrelated runtime issues in different parts of the stack.", "right": "issue:45229"}, {"accept": false, "left": "issue:43066", "reason": "Both involve tokenizer behavior in v5, but one is wrong decoder type selection while the other ignores `tokenizer.json`. Distinct bugs.", "right": "issue:44462"}, {"accept": false, "left": "issue:43550", "reason": "`torch.compile` + SDPA failure in Bamba is unrelated to `_get_feat_extract_output_lengths` behavior in qwen3_omni_moe.", "right": "issue:45083"}], "summary": "This cluster is a loose collection of Transformers regression reports with some superficial similarity around tokenizers/position ids, but the soft pairs do not look like true duplicates or mergeable PR-equivalents. Most are model-specific bugs in different subsystems."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:45200|issue:45381", "issue:43404|issue:45200", "issue:43611|issue:43931", "issue:39692|issue:44493", "issue:43819|issue:44493", "issue:38175|issue:44493", "issue:44448|issue:44779", "issue:44977|issue:45229", "issue:43066|issue:44462", "issue:43550|issue:45083"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7218, "estimated_input_tokens": 3481, "item_count": 18, "node_count": 18, "serialized_chars": 13922, "soft_pair_count": 11}, "cached_at": "2026-04-14T12:50:47Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6bdaa4a692d1674390a4dec8443fb91344edd7f209a398030c29227dbb4ad784", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29127", "issue:32090", "issue:40444", "issue:43335", "issue:43450", "issue:43526", "issue:43550", "issue:43606", "issue:43653", "issue:43723", "issue:44448", "issue:44554", "issue:44857", "issue:44977", "issue:45072", "issue:45083", "issue:45372", "issue:45406"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43450", "reason": "Both are shape/label-related processor bugs, but one is about batched video input shapes and the other is BeitImageProcessorFast dropping labels; different code paths and fixes.", "right": "issue:43526"}, {"accept": false, "left": "issue:43335", "reason": "These affect different models and logic: SwitchTransformers sparse-layer construction vs qwen3_omni_moe feature-length helper behavior.", "right": "issue:45083"}, {"accept": false, "left": "issue:43335", "reason": "One is a configuration bug in SwitchTransformers; the other is a torch.compile/SDPA failure in Bamba-9B-v2. Not the same underlying defect.", "right": "issue:43550"}, {"accept": false, "left": "issue:29127", "reason": "LayoutLMv3 error messaging and Trainer _gpu_broadcast_one NoneType handling are unrelated issues.", "right": "issue:32090"}, {"accept": false, "left": "issue:40444", "reason": "Different subsystems and symptoms: Qwen2.5-VL iterable dataset with multiple images vs BeitImageProcessorFast label reduction.", "right": "issue:43526"}, {"accept": false, "left": "issue:45372", "reason": "Both involve Gemma 4, but one is an import-time dependency breakage and the other is a serve-time missing _tokenizer attribute; not the same concrete bug.", "right": "issue:45406"}, {"accept": false, "left": "issue:43723", "reason": "AutoTokenizer loading in v5 and an MPS attention correctness bug are unrelated.", "right": "issue:44554"}, {"accept": false, "left": "issue:44857", "reason": "Different failure modes in different models: LwDetrImageLoss AMP/CUDA crash vs dtype mismatch in SwitchTransformers/TimmWrapperModel bfloat16 inference.", "right": "issue:45072"}, {"accept": false, "left": "issue:44448", "reason": "Pegasus v4/v5 output differences and Qwen3.5 flash-attention generation problems are separate model-specific bugs.", "right": "issue:44977"}, {"accept": false, "left": "issue:43335", "reason": "SwitchTransformers sparse-layer initialization and BigBirdTokenizer mask-token registration are unrelated.", "right": "issue:43653"}, {"accept": false, "left": "issue:43606", "reason": "CPU offload device mismatch in bark-small and an AMP CUDA loss crash in LwDetrImageLoss are different code paths and fixes.", "right": "issue:44857"}], "summary": "The soft-similarity links are mostly superficial: the issues cover unrelated models, components, and failure modes, so this cluster should not be merged into a single duplicate set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43450|issue:43526", "issue:43335|issue:45083", "issue:43335|issue:43550", "issue:29127|issue:32090", "issue:40444|issue:43526", "issue:45372|issue:45406", "issue:43723|issue:44554", "issue:44857|issue:45072", "issue:44448|issue:44977", "issue:43335|issue:43653", "issue:43606|issue:44857"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7318, "estimated_input_tokens": 3531, "item_count": 18, "node_count": 18, "serialized_chars": 14124, "soft_pair_count": 12}, "cached_at": "2026-04-14T12:51:03Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d6800b3907560a267060d2315ac43ff8ea4f326a33e2cc83c28655b114c52047", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34689", "issue:35141", "issue:37428", "issue:41720", "issue:43335", "issue:43504", "issue:43550", "issue:43653", "issue:43720", "issue:43873", "issue:44112", "issue:44493", "issue:44898", "issue:44987", "issue:45072", "issue:45083", "issue:45198", "issue:45229"], "result": {"analyst_result": {"best_issue_reason": "No issue is a strong global representative because the soft matches are false positives and the topics are too diverse to collapse into one root problem.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits the cluster: the items span unrelated problems in model loading, tokenization, configuration, inference, CI, and offloading, with no pair showing the same concrete failure mode.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:44898", "reason": "Different models and different failure modes: Perceiver image interpolation vs. Wav2Vec2 save/tokenization behavior.", "right": "issue:45198"}, {"accept": false, "left": "issue:43335", "reason": "One is a SwitchTransformers config bug; the other is a Beit pretrained loading issue tied to a legacy field.", "right": "issue:43504"}, {"accept": false, "left": "issue:37428", "reason": "Both involve loading errors, but the reported breakages are unrelated: a missing flash-attention import symbol vs. a specific model load failure in transformers>=5.1.0.", "right": "issue:44987"}, {"accept": false, "left": "issue:43335", "reason": "SwitchTransformers config creation and GraniteSpeech CI device override failures are different code paths and symptoms.", "right": "issue:44112"}, {"accept": false, "left": "issue:43653", "reason": "BigBirdTokenizer special-token registration and qwen3_omni_moe feature-length helper behavior are unrelated.", "right": "issue:45083"}, {"accept": false, "left": "issue:43335", "reason": "A sparse-layer config bug is not the same underlying issue as bfloat16 dtype mismatches during inference.", "right": "issue:45072"}, {"accept": false, "left": "issue:43873", "reason": "Quantization offloading problems and Gemma4 multi-GPU CUDA OOM are different operational issues.", "right": "issue:45229"}, {"accept": false, "left": "issue:43550", "reason": "Bamba torch.compile/SDPA failure is unrelated to the GraniteSpeech stale device override CI test.", "right": "issue:44112"}, {"accept": false, "left": "issue:43873", "reason": "Quantization/offloading behavior does not match the 'unexpected key with position id' loading problem.", "right": "issue:44493"}, {"accept": false, "left": "issue:34689", "reason": "Llama 3.2 Vision loading breakage and output-embedding reinitialization on resize are different bugs.", "right": "issue:35141"}, {"accept": false, "left": "issue:41720", "reason": "Qwen3 auto device mapping cudaErrorAssert and Gemma4 multi-GPU OOM are both inference failures but not the same root cause.", "right": "issue:45229"}, {"accept": false, "left": "issue:43720", "reason": "BitNet packed-weight unpacking during accelerate loading is unrelated to the GraniteSpeech device override CI failure.", "right": "issue:44112"}], "summary": "This cluster is a set of mostly unrelated issue titles that only look superficially similar. None of the soft pairs appears to describe the same underlying bug or change, so I would not merge any of them as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44898|issue:45198", "issue:43335|issue:43504", "issue:37428|issue:44987", "issue:43335|issue:44112", "issue:43653|issue:45083", "issue:43335|issue:45072", "issue:43873|issue:45229", "issue:43550|issue:44112", "issue:43873|issue:44493", "issue:34689|issue:35141", "issue:41720|issue:45229", "issue:43720|issue:44112"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7176, "estimated_input_tokens": 3460, "item_count": 18, "node_count": 18, "serialized_chars": 13840, "soft_pair_count": 11}, "cached_at": "2026-04-14T12:51:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "619483b9ab0956e0617e7cc52c69851cc911db5601176f8bfadee5eed3c80ccd", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:25251", "issue:29942", "issue:32090", "issue:33357", "issue:35141", "issue:42915", "issue:42994", "issue:43065", "issue:43278", "issue:43329", "issue:43994", "issue:44442", "issue:44448", "issue:44466", "issue:44734", "issue:45072", "issue:45406", "issue:45412"], "result": {"analyst_result": {"best_issue_reason": "Issue 44734 is the strongest representative issue here because it is precise, reproducible, and tied to a single code path rather than a broad symptom.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44734 is the most concrete and actionable report in the set: it names a specific endpoint (`/v1/responses`), a specific failure mode (wrong tensor indexing during KV-cache continuation), and a narrowly scoped code path.", "canonical_pr_reason": null, "confidence": 0.87, "soft_edge_verdicts": [{"accept": false, "left": "issue:43065", "reason": "Different bugs in different areas: `Sam3PixelDecoder` dummy conv vs SigLIP2 bad outputs with AutoModel/pipeline. Only a loose vision-model similarity.", "right": "issue:43994"}, {"accept": false, "left": "issue:43278", "reason": "One is an embedding dtype mismatch between train/eval; the other is an undefined-variable bug in multimodal token counting for video. Not the same underlying issue.", "right": "issue:43329"}, {"accept": false, "left": "issue:29942", "reason": "Flash Attention 2 test failures and token-embedding resizing reinitialization are unrelated code paths and symptoms.", "right": "issue:35141"}, {"accept": false, "left": "issue:43065", "reason": "Dummy Conv2d in a pixel decoder and RT-DETR memory not being released are different defects with no shared failing mechanism.", "right": "issue:45412"}, {"accept": false, "left": "issue:32090", "reason": "Trainer broadcast TypeError with `NoneType` is unrelated to a MacOS bus error loading a community CLIP model.", "right": "issue:33357"}, {"accept": false, "left": "issue:44442", "reason": "Tokenizer loading failure and dtype mismatch inference failures are different subsystems and failure modes.", "right": "issue:45072"}, {"accept": false, "left": "issue:44448", "reason": "Both are v5 regressions, but one is generation/output divergence and the other is serialization of tied weights; not the same concrete bug.", "right": "issue:44466"}, {"accept": false, "left": "issue:43994", "reason": "SigLIP2 output corruption and RT-DETR memory retention are unrelated model-specific problems.", "right": "issue:45412"}, {"accept": false, "left": "issue:42915", "reason": "Both involve quantization, but one is a Qwen3Moe FineGrainedFP8 failure and the other is quantized model saving; different code paths.", "right": "issue:42994"}, {"accept": false, "left": "issue:44734", "reason": "KV-cache continuation crash in serve and missing `_tokenizer` on `Gemma4Processor` are distinct serving bugs.", "right": "issue:45406"}, {"accept": false, "left": "issue:25251", "reason": "Pipeline `top_k` nesting behavior and Trainer GPU broadcast `NoneType` error are completely unrelated.", "right": "issue:32090"}], "summary": "The soft pairs are mostly superficial textual matches across unrelated bugs; none are strong enough to deduplicate. The best standalone issue is the KV-cache continuation crash in `transformers serve` because it has the clearest concrete failure path and likely reproducibility."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43065|issue:43994", "issue:43278|issue:43329", "issue:29942|issue:35141", "issue:43065|issue:45412", "issue:32090|issue:33357", "issue:44442|issue:45072", "issue:44448|issue:44466", "issue:43994|issue:45412", "issue:42915|issue:42994", "issue:44734|issue:45406", "issue:25251|issue:32090"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6988, "estimated_input_tokens": 3366, "item_count": 18, "node_count": 18, "serialized_chars": 13464, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:51:37Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ae346fe8c050967a230345ec1df5b720f2d71f41fb16b0519feda2cf2311b11f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29942", "issue:30064", "issue:43257", "issue:43278", "issue:43381", "issue:43653", "issue:43824", "issue:44206", "issue:44279", "issue:44442", "issue:44479", "issue:44610", "issue:44743", "issue:44871", "issue:44977", "issue:45103", "issue:45372", "issue:45412"], "result": {"analyst_result": {"best_issue_reason": "No single issue is a strong global representative; the cluster is too mixed, and the apparent similarities are mostly superficial.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out because the items describe different underlying bugs rather than one shared defect.", "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:44871", "reason": "Different failure modes and model families: Gemma eos-token config mismatch vs Qwen3.5 flash-attention generation regression.", "right": "issue:44977"}, {"accept": false, "left": "issue:43653", "reason": "Different tokenizer bugs: BigBird special-token registration vs AutoTokenizer failing to load a specific tokenizer class.", "right": "issue:44442"}, {"accept": false, "left": "issue:43257", "reason": "Different problems: missing weight conversion with accelerate/deepspeed vs embedding dtype changing during evaluation.", "right": "issue:43278"}, {"accept": false, "left": "issue:43278", "reason": "Unrelated behavior: dtype drift in eval vs gradient checkpointing being unsupported in eval mode.", "right": "issue:43381"}, {"accept": false, "left": "issue:44206", "reason": "Different code paths and symptoms: feature extractor center-arg regression vs video-input regression in Qwen VL models.", "right": "issue:44479"}, {"accept": false, "left": "issue:45103", "reason": "Unrelated import/parsing issues: auto_docstring annotation handling vs mistral_common import failure affecting Gemma 4 loading.", "right": "issue:45372"}, {"accept": false, "left": "issue:43653", "reason": "Tokenizer special-token registration bug is unrelated to recurrent state reset when caching sequence chunks.", "right": "issue:44743"}, {"accept": false, "left": "issue:43824", "reason": "One is a concrete missing import for a specific class; the other is a vague dependency issue, not the same bug.", "right": "issue:44279"}, {"accept": false, "left": "issue:44610", "reason": "Processor output size mismatch vs model memory not being released; different subsystems and defects.", "right": "issue:45412"}, {"accept": false, "left": "issue:29942", "reason": "Failing Flash Attention 2 tests and void segmentation-map processing are unrelated issues.", "right": "issue:30064"}], "summary": "The set is heterogeneous: the soft pairs span unrelated bugs in different model families, tokenizers, processors, import paths, and runtime behavior. None of the proposed soft edges look like true duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44871|issue:44977", "issue:43653|issue:44442", "issue:43257|issue:43278", "issue:43278|issue:43381", "issue:44206|issue:44479", "issue:45103|issue:45372", "issue:43653|issue:44743", "issue:43824|issue:44279", "issue:44610|issue:45412", "issue:29942|issue:30064"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6840, "estimated_input_tokens": 3292, "item_count": 18, "node_count": 18, "serialized_chars": 13165, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:51:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fc9d87035391658bb63b95c85a7820bfedc38064d5246a34c31c9d5cdf12416f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29127", "issue:32090", "issue:33290", "issue:33357", "issue:34567", "issue:39692", "issue:43262", "issue:43334", "issue:43504", "issue:43526", "issue:43531", "issue:44186", "issue:44206", "issue:44387", "issue:44857", "issue:44977", "issue:45229", "issue:45245"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:33357", "reason": "Both involve vision models, but one is a MacOS bus error in CLIP loading while the other is a SigLIP2 documentation/example mismatch and quantization failure; different bugs and fixes.", "right": "issue:39692"}, {"accept": false, "left": "issue:29127", "reason": "LayoutLMv3 error messaging and Trainer token-count tracking are unrelated code paths with different symptoms and remedies.", "right": "issue:34567"}, {"accept": false, "left": "issue:43334", "reason": "Both mention Qwen models, but one is a missing `pad_token_id` on load and the other is a `sliding_window` issue for MoE models; not the same underlying defect.", "right": "issue:43531"}, {"accept": false, "left": "issue:32090", "reason": "Trainer broadcast `NoneType` crash and DeepSpeed AdaFactor OOM are distinct failures; they do not point to one shared fix.", "right": "issue:33290"}, {"accept": false, "left": "issue:43504", "reason": "BEiT legacy-field loading failure and LwDETR AMP/CUDA crash are unrelated model/feature bugs.", "right": "issue:44857"}, {"accept": false, "left": "issue:44977", "reason": "Flash-attention generation regression and category-limit runtime error are different issues with no common code path.", "right": "issue:45245"}, {"accept": false, "left": "issue:44387", "reason": "Both are OOM reports, but one is about extra reserved memory under int4 quantization and the other is multi-GPU inference on Gemma4; the causes are different.", "right": "issue:45229"}, {"accept": false, "left": "issue:43526", "reason": "BEiT fast image processor label reduction and LayoutLMv2 tokenizer NER/padding crashes are separate preprocessing bugs.", "right": "issue:44186"}, {"accept": false, "left": "issue:43262", "reason": "Audio chat-template sampling-rate defaulting and LasrFeatureExtractor `center` argument handling are different audio-processing problems.", "right": "issue:44206"}], "summary": "These issues are heterogeneous and do not form a duplicate cluster. The soft pairs only share broad subsystem or model-family similarity, not the same concrete bug or change, so none should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:33357|issue:39692", "issue:29127|issue:34567", "issue:43334|issue:43531", "issue:32090|issue:33290", "issue:43504|issue:44857", "issue:44977|issue:45245", "issue:44387|issue:45229", "issue:43526|issue:44186", "issue:43262|issue:44206"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7118, "estimated_input_tokens": 3431, "item_count": 18, "node_count": 18, "serialized_chars": 13724, "soft_pair_count": 11}, "cached_at": "2026-04-14T12:52:09Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ba809f58e5de5c286decac68ce108343de042d585bb2ad1052b1568bf7b9d8d1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29127", "issue:29942", "issue:33357", "issue:35141", "issue:36010", "issue:42915", "issue:43232", "issue:43381", "issue:43388", "issue:43526", "issue:43550", "issue:43606", "issue:43701", "issue:43931", "issue:43994", "issue:44442", "issue:44857", "issue:44898"], "result": {"analyst_result": {"best_issue_reason": "issue:43701 has the strongest triage signal among these unrelated issues (highest inbound references and a generic core-trainer symptom), making it the best representative issue for this mixed cluster.", "best_pr_reason": null, "canonical_issue_reason": "issue:43701 is the most broadly framed and most referenced item in the set, so it is the best single issue anchor if one must be chosen, but it is not actually a duplicate of the others.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43381", "reason": "Eval-mode gradient checkpointing behavior is unrelated to resume_from_checkpoint key mismatches; different code paths and failure modes.", "right": "issue:43701"}, {"accept": false, "left": "issue:44442", "reason": "Tokenizer loading failure and Perceiver interpolation/image-resolution failure are separate model/component bugs.", "right": "issue:44898"}, {"accept": false, "left": "issue:43232", "reason": "Generation kwargs sync logic and metrics label gathering are distinct subsystems with no shared concrete bug.", "right": "issue:43388"}, {"accept": false, "left": "issue:29942", "reason": "Flash Attention 2 test failures do not match a GenerationMixin import regression; one is backend testing, the other is API import resolution.", "right": "issue:36010"}, {"accept": false, "left": "issue:29127", "reason": "LayoutLMv3 box-validation messaging and CLIP bus errors on macOS are unrelated issues.", "right": "issue:33357"}, {"accept": false, "left": "issue:43526", "reason": "BeitImageProcessorFast label reduction and LwDetrImageLoss AMP/CUDA crashes affect different pipelines and code paths.", "right": "issue:44857"}, {"accept": false, "left": "issue:43550", "reason": "torch.compile/SDPA behavior in Bamba has nothing to do with checkpoint resume key mismatches.", "right": "issue:43701"}, {"accept": false, "left": "issue:43606", "reason": "CPU offload device mismatch in Bark is a runtime placement bug, not the same as checkpoint resumption key handling.", "right": "issue:43701"}, {"accept": false, "left": "issue:42915", "reason": "FineGrainedFP8 failure in Qwen3Moe and Qwen3-VL weight-shape mismatch are different model-loading/configuration problems.", "right": "issue:43931"}, {"accept": false, "left": "issue:29127", "reason": "LayoutLMv3 error messaging and embedding reinitialization after resizing token embeddings are unrelated bugs.", "right": "issue:35141"}, {"accept": false, "left": "issue:43526", "reason": "label reduction in an image processor and nonsensical SigLIP2 outputs with AutoModel/pipeline are not the same underlying defect.", "right": "issue:43994"}], "summary": "This cluster is not a duplicate set: the items span unrelated bugs across generation, image processing, model loading, training utilities, and hardware/runtime issues. All soft-similarity pairs should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43381|issue:43701", "issue:44442|issue:44898", "issue:43232|issue:43388", "issue:29942|issue:36010", "issue:29127|issue:33357", "issue:43526|issue:44857", "issue:43550|issue:43701", "issue:43606|issue:43701", "issue:42915|issue:43931", "issue:29127|issue:35141", "issue:43526|issue:43994"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6534, "estimated_input_tokens": 3139, "item_count": 17, "node_count": 17, "serialized_chars": 12555, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:52:24Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d9a2785684e52b671d33be63be79c8a12387da1828cc3963c8451e65337bf104", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43316", "issue:43525", "issue:43575", "issue:43646", "issue:43673", "issue:43701", "issue:43761", "issue:43931", "issue:43994", "issue:44077", "issue:44188", "issue:44206", "issue:44479", "issue:44743", "issue:44811", "issue:45216", "issue:45290"], "result": {"analyst_result": {"best_issue_reason": "issue:43646 is the best single issue to anchor the cluster because it is the most generic and cross-cutting, whereas the others are model- or feature-specific regressions.", "best_pr_reason": null, "canonical_issue_reason": "issue:43646 is the broadest representative: it describes a general Transformers 5.0.0 initialization regression, which is the only candidate that can plausibly subsume multiple nearby v5 breakages in this set.", "canonical_pr_reason": null, "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "issue:44479", "reason": "Both are Qwen-related regressions, but one is a video-input path and the other is chat-template/tool-call handling; different failure modes and code paths.", "right": "issue:45290"}, {"accept": false, "left": "issue:43761", "reason": "CLIPVisionModel hidden_states regression and torch.compile attention-kernel divergence are separate bugs in different subsystems.", "right": "issue:44188"}, {"accept": false, "left": "issue:43316", "reason": "Both are config API inconsistencies, but one concerns Gemma3TextConfig fields and the other a missing Llama4Config attribute; not the same underlying defect.", "right": "issue:43525"}, {"accept": false, "left": "issue:44743", "reason": "Recurrent-state reset with cache is unrelated to Whisper processor batch_decode ignoring skip_special_tokens.", "right": "issue:44811"}, {"accept": false, "left": "issue:43994", "reason": "Siglip2 incorrect AutoModel/pipeline outputs and LasrFeatureExtractor center-arg crash are distinct vision/audio preprocessing issues.", "right": "issue:44206"}, {"accept": false, "left": "issue:43931", "reason": "One is a Qwen3-VL weight-shape load mismatch; the other is a save_pretrained regression for Qwen3.5 checkpoints.", "right": "issue:45216"}, {"accept": false, "left": "issue:43673", "reason": "Both mention cache behavior, but chunked_prefill cache handling and Qwen recurrent-state reset are different concrete bugs.", "right": "issue:44743"}, {"accept": false, "left": "issue:43646", "reason": "General custom model initialization breakage is not the same as patchtsmixer-specific post_init validation; too different to merge.", "right": "issue:44077"}, {"accept": false, "left": "issue:43575", "reason": "TP OOM while loading Qwen2-57B-A14B-Instruct and resume_from_checkpoint key mismatch are unrelated symptoms and workflows.", "right": "issue:43701"}], "summary": "These are mostly unrelated regression/issues across different models and code paths. The cluster is held together by broad transformer/v5 similarity, but the soft pairs do not look like true duplicates or mergeable PR-equivalents."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44479|issue:45290", "issue:43761|issue:44188", "issue:43316|issue:43525", "issue:44743|issue:44811", "issue:43994|issue:44206", "issue:43931|issue:45216", "issue:43673|issue:44743", "issue:43646|issue:44077", "issue:43575|issue:43701"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6788, "estimated_input_tokens": 3266, "item_count": 18, "node_count": 18, "serialized_chars": 13062, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:52:44Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ecf10e04cd50463cd54115353db8dfa30bfb4d8b2394e353428600fa0298de41", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29942", "issue:33290", "issue:33666", "issue:34567", "issue:34689", "issue:39692", "issue:41628", "issue:43493", "issue:43575", "issue:43611", "issue:43653", "issue:43701", "issue:43827", "issue:44077", "issue:44206", "issue:44387", "issue:44479", "issue:45372"], "result": {"analyst_result": {"best_issue_reason": "No issue here is a strong cluster representative; the safest triage is to keep them separate rather than forcing a canonical issue.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out because the items are not one underlying bug; they span unrelated regressions across different features, models, and versions.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:41628", "reason": "Both are import errors, but they involve different missing symbols and different processor/loading paths; not the same bug.", "right": "issue:45372"}, {"accept": false, "left": "issue:43575", "reason": "Both mention OOM, but one is tensor-parallel model loading and the other is an int4 quantization memory regression; different failure modes.", "right": "issue:44387"}, {"accept": false, "left": "issue:29942", "reason": "Flash Attention test failures and Qwen2-VL multi-GPU training are unrelated problems.", "right": "issue:33666"}, {"accept": false, "left": "issue:43653", "reason": "Tokenizer special-token registration and resume-from-checkpoint key mismatches are distinct bugs with different code paths.", "right": "issue:43701"}, {"accept": false, "left": "issue:43493", "reason": "A SigLIP2 implementation discrepancy is unrelated to docs still referencing removed pipeline APIs.", "right": "issue:43827"}, {"accept": false, "left": "issue:33290", "reason": "Deepspeed/Adafactor OOM and TrainerState token-count updates are separate issues.", "right": "issue:34567"}, {"accept": false, "left": "issue:34689", "reason": "Both are model-loading failures, but they affect different releases and likely different loader logic; not one concrete defect.", "right": "issue:43611"}, {"accept": false, "left": "issue:44077", "reason": "PatchTSMixer post_init validation and Qwen video-input regression are unrelated changes.", "right": "issue:44479"}, {"accept": false, "left": "issue:39692", "reason": "A SigLIP2 docs/example problem and a feature-extractor arg regression do not look like the same underlying bug.", "right": "issue:44206"}], "summary": "This cluster is heterogeneous: the soft-linked issues only share broad surface themes like OOMs, import/load failures, or model regressions, but they point to different models, code paths, and failure modes. None of the candidate pairs looks like a true duplicate or mergeable PR-equivalent."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:41628|issue:45372", "issue:43575|issue:44387", "issue:29942|issue:33666", "issue:43653|issue:43701", "issue:43493|issue:43827", "issue:33290|issue:34567", "issue:34689|issue:43611", "issue:44077|issue:44479", "issue:39692|issue:44206"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6684, "estimated_input_tokens": 3214, "item_count": 17, "node_count": 17, "serialized_chars": 12854, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:53:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "65acbeba0688cd40b0d559e3829dcfac908b4ad76c836c89fa0cc7be463d825a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33290", "issue:34689", "issue:35141", "issue:42175", "issue:43316", "issue:43381", "issue:43540", "issue:43873", "issue:44162", "issue:44190", "issue:44496", "issue:44514", "issue:44792", "issue:44855", "issue:44871", "issue:44877", "issue:45137"], "result": {"analyst_result": {"best_issue_reason": "#43873 is the most broadly scoped and currently open issue, so it is the least arbitrary representative of this heterogeneous set. It still should not be treated as a dedup target for the others.", "best_pr_reason": null, "canonical_issue_reason": "If a representative must be chosen, #43873 is the broadest runtime/infrastructure bug in the set and has the most signs of active user impact, but it is not a true duplicate of the others.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43381", "reason": "Different problems: gradient checkpointing in eval mode vs quantization/offloading behavior. Shared training/runtime wording is too broad to be a duplicate.", "right": "issue:43873"}, {"accept": false, "left": "issue:44190", "reason": "Different failure modes and code paths: local dataset loading in an example script vs unrecognized model/config loading for a specific checkpoint.", "right": "issue:44496"}, {"accept": false, "left": "issue:43873", "reason": "Offloading/quantization bug and Gemma-3 eos_token_id config mismatch are unrelated configuration/runtime issues.", "right": "issue:44871"}, {"accept": false, "left": "issue:44162", "reason": "ESM2 runtime breakage and a janus model test failure concern different models and likely different regressions.", "right": "issue:44792"}, {"accept": false, "left": "issue:44855", "reason": "Python 3.13 TorchScript parse/IndentationError in DebertaV2 is unrelated to the DeepSpeed ZeRO3 deque IndexError.", "right": "issue:45137"}, {"accept": false, "left": "issue:33290", "reason": "OOM with Adafactor in DeepSpeed is a training-memory issue; token embedding reinitialization is a model initialization bug.", "right": "issue:35141"}, {"accept": false, "left": "issue:43540", "reason": "Both are multimodal processor crashes, but they affect different model families and different APIs/code paths, so they are not the same bug.", "right": "issue:44514"}, {"accept": false, "left": "issue:33290", "reason": "Adafactor OOM during training and Llama 3.2 Vision model loading failure are distinct issues with no shared concrete bug.", "right": "issue:34689"}, {"accept": false, "left": "issue:43316", "reason": "Both are config-related, but one is an API discrepancy in Gemma3TextConfig while the other is strict config loading for granite_speech; not the same defect.", "right": "issue:44877"}, {"accept": false, "left": "issue:42175", "reason": "Packaging/backend omission for TensorFlow extras is unrelated to a Gemma3TextConfig API mismatch.", "right": "issue:43316"}], "summary": "This cluster is actually a grab bag of unrelated issue reports across loading, config, multimodal processing, DeepSpeed, and packaging. None of the soft-similarity pairs look like the same underlying bug or a mergeable duplicate PR-equivalent change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43381|issue:43873", "issue:44190|issue:44496", "issue:43873|issue:44871", "issue:44162|issue:44792", "issue:44855|issue:45137", "issue:33290|issue:35141", "issue:43540|issue:44514", "issue:33290|issue:34689", "issue:43316|issue:44877", "issue:42175|issue:43316"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6786, "estimated_input_tokens": 3265, "item_count": 18, "node_count": 18, "serialized_chars": 13057, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:53:37Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5fa41efc461b2278125160a49ac702aaf088d7e1b3022b366db2ae7ee3f2dea5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33290", "issue:34689", "issue:36010", "issue:36331", "issue:41950", "issue:43316", "issue:43381", "issue:43388", "issue:43404", "issue:43441", "issue:43475", "issue:43643", "issue:43688", "issue:43716", "issue:43749", "issue:43856", "issue:43931", "issue:44913"], "result": {"analyst_result": {"best_issue_reason": "issue 43643 is the cleanest, most broadly phrased bug report in the set (missing fields when using trust_remote_code on AutoConfig reload), but it is only a representative issue\u2014not a duplicate anchor for the others.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue: these reports span unrelated failures (config reload, model loading, training/eval behavior, metrics, and model-specific regressions).", "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:43749", "reason": "Both involve loading problems, but one is an FSDP RAM-efficient-loading regression and the other is a model weight-shape mismatch for Qwen3-VL; different failure modes and fixes.", "right": "issue:43931"}, {"accept": false, "left": "issue:43643", "reason": "Both are config/reload related, but one is about trust_remote_code dropping fields and the other is GPTNeoXConfig rotary_pct not persisting; not the same bug.", "right": "issue:44913"}, {"accept": false, "left": "issue:43388", "reason": "Metrics batching/label truncation is unrelated to auxiliary-loss normalization in MoE models.", "right": "issue:43688"}, {"accept": false, "left": "issue:43381", "reason": "Gradient checkpointing in eval mode and MoE training memory usage are distinct issues with different code paths.", "right": "issue:43856"}, {"accept": false, "left": "issue:43404", "reason": "Both mention Mistral/Ministral, but one is untied lm_head weights and the other is a FlashAttention failure; not the same underlying defect.", "right": "issue:43441"}, {"accept": false, "left": "issue:33290", "reason": "Adafactor+DeepSpeed OOM is unrelated to CustomTrainer.compute_loss receiving an unexpected keyword argument.", "right": "issue:36331"}, {"accept": false, "left": "issue:43316", "reason": "Gemma3TextConfig API inconsistency and Mistral-3 image-preprocessor dtype mismatch are unrelated.", "right": "issue:43716"}, {"accept": false, "left": "issue:41950", "reason": "Video-classification pipeline processor selection and SAM 3 Video missing fpn_position_embeddings are different bugs in different components.", "right": "issue:43475"}, {"accept": false, "left": "issue:34689", "reason": "Both are import/model-loading regressions, but one is a Llama 3.2 vision model loading breakage and the other is a GenerationMixin import failure; separate causes.", "right": "issue:36010"}], "summary": "The cluster is heterogeneous: all soft pairs look like separate bugs/regressions rather than duplicates, so none should be merged. The strongest theme is general loading/config issues, but each pair still targets a different code path or model family."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43749|issue:43931", "issue:43643|issue:44913", "issue:43388|issue:43688", "issue:43381|issue:43856", "issue:43404|issue:43441", "issue:33290|issue:36331", "issue:43316|issue:43716", "issue:41950|issue:43475", "issue:34689|issue:36010"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7002, "estimated_input_tokens": 3373, "item_count": 18, "node_count": 18, "serialized_chars": 13489, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:53:51Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "27bc55f4a26987db38812256bd9e9e61a6e79ee82b7a7f2e9efe6fc708e6b781", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:16998", "issue:22355", "issue:25251", "issue:28282", "issue:43065", "issue:43295", "issue:43452", "issue:43761", "issue:43825", "issue:44077", "issue:44242", "issue:44479", "issue:44496", "issue:44534", "issue:44617", "issue:44871", "issue:45127", "issue:45200"], "result": {"analyst_result": {"best_issue_reason": "Issue 45200 is the strongest standalone issue for triage because it is open, specific, and describes an obvious defaulting fix path with clear downstream impact.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45200 is the clearest current bug report: it has a concrete expected behavior change (default mm_token_type_ids to zeros) and a narrow, actionable scope.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:45127", "reason": "Different problems: LoRA merge/extended vocabulary collapse vs Gemma 4 text-only fine-tuning needing mm_token_type_ids defaults.", "right": "issue:45200"}, {"accept": false, "left": "issue:43065", "reason": "Same model family but different bugs: dummy Conv2d in Sam3PixelDecoder vs CUDA OOM in Sam3Video.", "right": "issue:44617"}, {"accept": false, "left": "issue:43761", "reason": "Unrelated regressions affecting different models and code paths: CLIPVisionModel hidden states vs video-input regression in Qwen VL variants.", "right": "issue:44479"}, {"accept": false, "left": "issue:16998", "reason": "Different failures entirely: model_max_length confusion for DeBERTa-V3 vs missing transformers.onnx module import.", "right": "issue:22355"}, {"accept": false, "left": "issue:25251", "reason": "Pipeline output shape bug vs missing PyTorch import error; no shared underlying defect.", "right": "issue:28282"}, {"accept": false, "left": "issue:43295", "reason": "Both are regressions, but one is about processor/tokenizer/image handling while the other is an error-message wording issue for translation tasks.", "right": "issue:43825"}, {"accept": false, "left": "issue:43295", "reason": "Different model/config problems: processor.tokenizer regression vs Gemma-3 eos_token_id inconsistency.", "right": "issue:44871"}, {"accept": false, "left": "issue:43452", "reason": "Both involve model loading, but one is gguf_file support in from_pretrained while the other is an unrecognized-model/config.json issue.", "right": "issue:44496"}, {"accept": false, "left": "issue:44077", "reason": "PatchTSMixer post_init gating and non-persistent buffer initialization are separate implementation bugs.", "right": "issue:44534"}, {"accept": false, "left": "issue:43761", "reason": "Different functional defects: CLIP hidden_states not returned vs missing load balancing loss when output_router_logits=False.", "right": "issue:44242"}], "summary": "All proposed soft pairs look like distinct issues, usually only sharing a broad subsystem or release/regression context. None look safe to treat as duplicate bugs or mergeable PR-equivalents."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:45127|issue:45200", "issue:43065|issue:44617", "issue:43761|issue:44479", "issue:16998|issue:22355", "issue:25251|issue:28282", "issue:43295|issue:43825", "issue:43295|issue:44871", "issue:43452|issue:44496", "issue:44077|issue:44534", "issue:43761|issue:44242"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6462, "estimated_input_tokens": 3103, "item_count": 17, "node_count": 17, "serialized_chars": 12412, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:54:18Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "69c65f9084b50286445204a5dd5ffc461748b19c71adacb56ed087cab48078ba", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42617", "issue:43065", "issue:43575", "issue:43646", "issue:43742", "issue:43906", "issue:43931", "issue:44060", "issue:44479", "issue:44521", "issue:44610", "issue:44734", "issue:44913", "issue:44933", "issue:45290", "issue:45362", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "issue:45290 is the best representative issue for this set because it is specific, actionable, and describes a concrete failure mode rather than a generic reproduction or broader symptom.", "best_pr_reason": null, "canonical_issue_reason": "issue:45290 is the clearest direct bug report: it names the failing API (`apply_chat_template(tokenize=True)`), the concrete trigger (assistant messages with tool calls and no content), and the crash. issue:43906 is explicitly an isolated reproduction of the same underlying report.", "canonical_pr_reason": null, "confidence": 0.72, "soft_edge_verdicts": [{"accept": true, "left": "issue:43906", "reason": "Same underlying chat-template bug: 43906 is an isolated reproduction of the exact failure described in 45290.", "right": "issue:45290"}, {"accept": false, "left": "issue:43646", "reason": "Different problems: one is a broad Transformers 5.0 custom-model init breakage, the other is a GPTNeoXConfig reload/default persistence bug.", "right": "issue:44913"}, {"accept": false, "left": "issue:43931", "reason": "Different model-loading issues: weight-shape mismatch for Qwen3-VL vs a tied-weights warning in Qwen3-Next.", "right": "issue:44060"}, {"accept": false, "left": "issue:44521", "reason": "Both involve multimodal chat/video, but the failures differ: assistant mask generation vs incorrect vision_position_ids.", "right": "issue:45381"}, {"accept": false, "left": "issue:44933", "reason": "Unrelated symptoms and likely different code paths: missing import from image_utils vs Qwen3.5 chat crash.", "right": "issue:45362"}, {"accept": false, "left": "issue:44479", "reason": "One is a video-input regression for several VL models; the other is a serve/KV-cache tensor-indexing crash.", "right": "issue:44734"}, {"accept": false, "left": "issue:42617", "reason": "No shared underlying bug: 3d_parallel.py execution issue vs tensor-parallel OOM while loading a Qwen2 model.", "right": "issue:43575"}, {"accept": false, "left": "issue:43575", "reason": "Both are loading-related, but one is OOM on a huge Qwen model and the other is a key error on MobileLLM-125M; not the same bug.", "right": "issue:43742"}, {"accept": false, "left": "issue:43065", "reason": "Different components and failures: dummy Sam3PixelDecoder Conv2d vs OmDet-Turbo processor/model input-size mismatch.", "right": "issue:44610"}], "summary": "Most items are unrelated standalone issues. The only strong duplicate/reproduction pair is the chat-template crash report and its isolated reproduction; the rest look like distinct bugs in different model families or subsystems."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43906|issue:45290", "issue:43646|issue:44913", "issue:43931|issue:44060", "issue:44521|issue:45381", "issue:44933|issue:45362", "issue:44479|issue:44734", "issue:42617|issue:43575", "issue:43575|issue:43742", "issue:43065|issue:44610"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6520, "estimated_input_tokens": 3132, "item_count": 17, "node_count": 17, "serialized_chars": 12527, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:54:35Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "af221a55f47a9b8bc60853873375a986463fb4dc3cfa11136d2dad6e113b45fe", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:32090", "issue:35141", "issue:41720", "issue:43388", "issue:43575", "issue:43582", "issue:43650", "issue:43827", "issue:43994", "issue:44220", "issue:44464", "issue:44534", "issue:44792", "issue:44871", "issue:44913", "issue:44977", "issue:45245"], "result": {"analyst_result": {"best_issue_reason": "issue:43827 is the best representative only because it has the strongest linkage and a concrete, well-scoped problem statement; it is not a duplicate of the others.", "best_pr_reason": null, "canonical_issue_reason": "issue:43827 is the most established and externally linked item in the set, but the broader cluster is heterogeneous rather than a true duplicate group.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:44792", "reason": "Different failures: a janus image-generation test case vs Qwen3.5 flash-attention generation behavior. Same broad area, but not the same bug.", "right": "issue:44977"}, {"accept": false, "left": "issue:41720", "reason": "Unrelated symptoms and likely unrelated code paths: Qwen3 auto device-mapping CUDA assert vs a category-count RuntimeError.", "right": "issue:45245"}, {"accept": false, "left": "issue:44464", "reason": "Chunked generation with compiled forward is a generation/compile bug; Gemma-3 eos_token_id inconsistency is a config mismatch. Not the same issue.", "right": "issue:44871"}, {"accept": false, "left": "issue:43827", "reason": "One is a docs cleanup after pipeline removals; the other is a model/pipeline correctness bug. Shared mention of pipeline is too broad.", "right": "issue:43994"}, {"accept": false, "left": "issue:32090", "reason": "Trainer NoneType GPU broadcast error and post_init embedding reinitialization are distinct training/model-initialization problems.", "right": "issue:35141"}, {"accept": false, "left": "issue:41720", "reason": "Both involve large-model loading, but one is a CUDA assert under auto device mapping and the other is an OOM under tensor parallelism; different concrete failures.", "right": "issue:43575"}, {"accept": false, "left": "issue:44534", "reason": "v5 non-persistent buffer junk and GPTNeoX rotary_pct reload regression are separate config/state persistence issues with different mechanics.", "right": "issue:44913"}, {"accept": false, "left": "issue:43388", "reason": "Metric gathering label truncation and Apple Silicon caching_allocator_warmup TypeError are unrelated subsystems and failure modes.", "right": "issue:43582"}, {"accept": false, "left": "issue:43650", "reason": "No clear common bug: an unclear 'ADD THE DATA' report vs a torch fbank feature extraction issue.", "right": "issue:44220"}], "summary": "These items do not form a duplicate cluster; they span many unrelated bugs across training, generation, config persistence, docs, and model-specific failures. No soft pair looks like the same underlying issue/change, so all are rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44792|issue:44977", "issue:41720|issue:45245", "issue:44464|issue:44871", "issue:43827|issue:43994", "issue:32090|issue:35141", "issue:41720|issue:43575", "issue:44534|issue:44913", "issue:43388|issue:43582", "issue:43650|issue:44220"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6820, "estimated_input_tokens": 3282, "item_count": 17, "node_count": 17, "serialized_chars": 13127, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:54:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "15c71b4cab4d830c53b6f9e248e5081ebac4081baaf8ecf10ba248ca94944c49", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43116", "issue:43335", "issue:43540", "issue:43761", "issue:43976", "issue:44077", "issue:44079", "issue:44190", "issue:44355", "issue:44496", "issue:44734", "issue:44743", "issue:44855", "issue:44964", "issue:45127", "issue:45357", "issue:45372"], "result": {"analyst_result": {"best_issue_reason": "Issue 44855 is the strongest standalone representative: it is a concrete, reproducible regression with a clear root cause and broad relevance to Python 3.13/JIT parsing.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out: the items span distinct models, scripts, config bugs, and Python-version regressions, so choosing one as the cluster representative would be arbitrary.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:44190", "reason": "Different failure modes: local dataset loading in an example script vs loading a specific multimodal model under latest transformers.", "right": "issue:44964"}, {"accept": false, "left": "issue:45127", "reason": "One is about LoRA merging with extended vocab/tied embeddings; the other is about incorrect visual encoder keys in save_pretrained. Different bugs.", "right": "issue:45357"}, {"accept": false, "left": "issue:43761", "reason": "Both involve forward-pass state handling, but they affect different models and different mechanisms (CLIP hidden states vs Qwen recurrent state/cache reset).", "right": "issue:44743"}, {"accept": false, "left": "issue:44355", "reason": "General problems running compiled Python files are not the same as the specific DebertaV2/@torch.jit.script parsing regression on Python 3.13.", "right": "issue:44855"}, {"accept": false, "left": "issue:43335", "reason": "SwitchTransformers sparse-layer config bug is unrelated to the Python 3.13 IndentationError/JIT parsing issue.", "right": "issue:44855"}, {"accept": false, "left": "issue:43976", "reason": "A packaging/Python-version incompatibility is not the same as the specific decorator/comment parsing failure in DebertaV2Model.", "right": "issue:44855"}, {"accept": false, "left": "issue:44496", "reason": "Both are model-loading errors, but they stem from different causes and different model families (missing model_type vs mistral_common import breakage).", "right": "issue:45372"}, {"accept": false, "left": "issue:43540", "reason": "Video input processing in Qwen3OmniMoe is unrelated to KV-cache continuation tensor indexing in transformers serve.", "right": "issue:44734"}, {"accept": false, "left": "issue:44077", "reason": "These are separate core-library bugs: PatchTSMixer post_init validation vs ModelOutput key assignment behavior.", "right": "issue:44079"}, {"accept": false, "left": "issue:43116", "reason": "Both mention example scripts, but the underlying problems are different tasks and code paths: multi-label classification output vs local dataset loading for image classification.", "right": "issue:44190"}], "summary": "This is a heterogeneous set of unrelated bug reports, not a duplicate-heavy cluster. None of the soft pairs appear to describe the same underlying issue or change, so all should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44190|issue:44964", "issue:45127|issue:45357", "issue:43761|issue:44743", "issue:44355|issue:44855", "issue:43335|issue:44855", "issue:43976|issue:44855", "issue:44496|issue:45372", "issue:43540|issue:44734", "issue:44077|issue:44079", "issue:43116|issue:44190"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7030, "estimated_input_tokens": 3387, "item_count": 18, "node_count": 18, "serialized_chars": 13548, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:55:09Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ae3842bbc209c46f56a678644c720e92e32129ff22505e07841d81c48ccc45c0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36246", "issue:38175", "issue:38617", "issue:39692", "issue:41628", "issue:41950", "issue:42175", "issue:43065", "issue:43295", "issue:43335", "issue:43404", "issue:43644", "issue:44016", "issue:44190", "issue:44279", "issue:44913", "issue:45216", "issue:45276"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43644", "reason": "Both are config/state-related, but one reports junk-filled non-persistent buffers on Transformers 5.0.0 while the other is GPTNeoX rotary_pct not surviving reload. Different code paths and symptoms.", "right": "issue:44913"}, {"accept": false, "left": "issue:36246", "reason": "Both are import failures, but they involve different missing symbols from different modules: Qwen2_5_VLImageProcessor vs AutoImageProcessor.", "right": "issue:41628"}, {"accept": false, "left": "issue:41950", "reason": "A video-classification pipeline bug looking for image processors is unrelated to a notebook syntax error in the \u201cTransformers, what can they do?\u201d tutorial.", "right": "issue:44016"}, {"accept": false, "left": "issue:43404", "reason": "Different model families and failure modes: Mistral3 lm_head tying / image-text generation vs Qwen3.5 save_pretrained checkpoint corruption.", "right": "issue:45216"}, {"accept": false, "left": "issue:43295", "reason": "These affect different APIs: processor.tokenizer/image handling regression vs gemma4 resize_token_embeddings not propagating to embed/output embeddings.", "right": "issue:45276"}, {"accept": false, "left": "issue:36246", "reason": "Separate import errors from different modules and missing names; no shared code-path bug.", "right": "issue:38617"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 documentation example problems are distinct from local dataset loading failures in run_image_classification_no_trainer.py.", "right": "issue:44190"}, {"accept": false, "left": "issue:43335", "reason": "SwitchTransformers sparse-layer configuration with num_sparse_encoder_layers=0 is unrelated to loading a local dataset in an image-classification training script.", "right": "issue:44190"}, {"accept": false, "left": "issue:42175", "reason": "One is about tensorflow not being installed with the torch extra; the other is a generic dependency issue without the same concrete packaging defect.", "right": "issue:44279"}, {"accept": false, "left": "issue:38175", "reason": "Different model bugs: unexpected zero probabilities in SigLIP2 versus a dummy Conv2d in Sam3PixelDecoder.", "right": "issue:43065"}], "summary": "This cluster is heterogeneous: it mixes unrelated import errors, model-specific regressions, docs/notebook issues, pipeline behavior, and config/save/load bugs. None of the soft pairs look like the same underlying defect or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43644|issue:44913", "issue:36246|issue:41628", "issue:41950|issue:44016", "issue:43404|issue:45216", "issue:43295|issue:45276", "issue:36246|issue:38617", "issue:39692|issue:44190", "issue:43335|issue:44190", "issue:42175|issue:44279", "issue:38175|issue:43065"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6514, "estimated_input_tokens": 3129, "item_count": 17, "node_count": 17, "serialized_chars": 12513, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:55:30Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "27e1d5faade20bdeefb0c13fd90e47a316c4e529bb123cc8cd9bf7d15ac530c8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42907", "issue:43441", "issue:43479", "issue:43854", "issue:44016", "issue:44112", "issue:44186", "issue:44220", "issue:44246", "issue:44466", "issue:44734", "issue:44792", "issue:44871", "issue:44913", "issue:45003", "issue:45245", "issue:45375"], "result": {"analyst_result": {"best_issue_reason": "No global best issue is suitable as a duplicate target because there is no shared concrete bug underlying the set.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits this cluster: the items span distinct failures (serialization, tokenizer crashes, CI test flakes, config validation, import behavior, and notebook syntax).", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:44871", "reason": "Both are config/validation themed, but Gemma eos_token_id mismatch and the 2^24 category limit are different bugs with different codepaths.", "right": "issue:45245"}, {"accept": false, "left": "issue:44186", "reason": "Tokenizer crashes on NER/padding versus serve KV-cache continuation indexing errors are unrelated failures in different subsystems.", "right": "issue:44734"}, {"accept": false, "left": "issue:43479", "reason": "Both concern model config fields, but one is None-default initialization and the other is strict-dropping of a missing field; different concrete issues.", "right": "issue:45375"}, {"accept": false, "left": "issue:44016", "reason": "A notebook syntax error and a feature-extraction runtime issue are plainly unrelated.", "right": "issue:44220"}, {"accept": false, "left": "issue:44246", "reason": "Import slowness and unsafe sys.modules access may be adjacent, but they are not the same underlying bug or fix.", "right": "issue:45003"}, {"accept": false, "left": "issue:44466", "reason": "Both involve persistence/reload behavior, but lm_head.weight serialization and rotary_pct resetting are different model state bugs.", "right": "issue:44913"}, {"accept": false, "left": "issue:44112", "reason": "These are separate CI/test failures in different models with no shared root cause.", "right": "issue:44792"}, {"accept": false, "left": "issue:42907", "reason": "Both mention Ministral, but saving dequantized weights and FlashAttention failure are distinct codepaths and bugs.", "right": "issue:43441"}, {"accept": false, "left": "issue:43854", "reason": "Different models and different test failures; no evidence of the same bug or change.", "right": "issue:44792"}], "summary": "This cluster is a loose collection of unrelated bug reports across different models, configs, tests, and runtime paths. The soft-similarity links are mostly thematic rather than true duplicates, so none should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44871|issue:45245", "issue:44186|issue:44734", "issue:43479|issue:45375", "issue:44016|issue:44220", "issue:44246|issue:45003", "issue:44466|issue:44913", "issue:44112|issue:44792", "issue:42907|issue:43441", "issue:43854|issue:44792"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6996, "estimated_input_tokens": 3370, "item_count": 18, "node_count": 18, "serialized_chars": 13478, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:55:45Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4dedd0e08432fbf7c035f9dda2f90f063a54cb6d57b7d0427ff5a261ee63e7a2", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43295", "issue:43299", "issue:43643", "issue:43673", "issue:43824", "issue:43976", "issue:44016", "issue:44062", "issue:44112", "issue:44279", "issue:44336", "issue:44496", "issue:44933", "issue:44964", "issue:44987", "issue:45216", "issue:45335", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": "Issue 43295 is the most generally representative of a core library regression affecting downstream model code, but none of the issues cleanly subsume the others.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43295 is the broadest user-facing regression in the set, centered on a shared multimodal processor/tokenizer API breakage. That said, the overall cluster is heterogeneous, so it is only a weak canonical choice.", "canonical_pr_reason": null, "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "issue:43673", "reason": "Different failures: generation cache/chunked prefill vs ANSI output in loading_report. No shared code path or concrete duplicate behavior.", "right": "issue:44336"}, {"accept": false, "left": "issue:43299", "reason": "Different model families and bugs: Qwen3VL MoE loading vs Qwen3.5 save_pretrained checkpoint contents.", "right": "issue:45216"}, {"accept": false, "left": "issue:43295", "reason": "Processor/tokenizer image-handling regression is unrelated to t5gemma resize_token_embeddings not updating decoder.embed_tokens.", "right": "issue:45335"}, {"accept": false, "left": "issue:44016", "reason": "Notebook syntax error and AddedToken keyword collision are unrelated bug classes.", "right": "issue:44062"}, {"accept": false, "left": "issue:43643", "reason": "Both involve config loading, but one is missing fields with trust_remote_code and the other is an unrecognized model/model_type error. Different root causes.", "right": "issue:44496"}, {"accept": false, "left": "issue:43824", "reason": "Both are import errors, but they refer to different symbols/modules and different missing-export problems.", "right": "issue:44933"}, {"accept": false, "left": "issue:43976", "reason": "Python version support incompatibility is not the same bug as failing to load a specific model.", "right": "issue:44987"}, {"accept": false, "left": "issue:43976", "reason": "A vague dependency issue is not enough to match a specific Python-version compatibility regression.", "right": "issue:44279"}, {"accept": false, "left": "issue:44112", "reason": "CI test flakiness in GraniteSpeech is unrelated to model loading failure for Phi-4 multimodal.", "right": "issue:44964"}, {"accept": false, "left": "issue:43299", "reason": "Different Qwen variants and different code paths: model loading vs save_pretrained visual encoder key serialization.", "right": "issue:45357"}], "summary": "These issues span many unrelated Transformers regressions, import/load failures, and CI/docs problems. There is no clear duplicate cluster here; all soft pairs should be rejected as different underlying bugs or changes."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43673|issue:44336", "issue:43299|issue:45216", "issue:43295|issue:45335", "issue:44016|issue:44062", "issue:43643|issue:44496", "issue:43824|issue:44933", "issue:43976|issue:44987", "issue:43976|issue:44279", "issue:44112|issue:44964", "issue:43299|issue:45357"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7240, "estimated_input_tokens": 3492, "item_count": 18, "node_count": 18, "serialized_chars": 13968, "soft_pair_count": 11}, "cached_at": "2026-04-14T12:56:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "93b05bb3a8b4e914fb5d894cfe6b8d1bf5e7dfe24ee73f167e050a458656b95a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43526", "issue:43577", "issue:43901", "issue:44075", "issue:44242", "issue:44297", "issue:44351", "issue:44368", "issue:44462", "issue:44496", "issue:44521", "issue:44779", "issue:45042", "issue:45081", "issue:45200", "issue:45216", "issue:45278", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": "issue:44242 is the most established and actionable issue here; the others are mostly separate regressions or unrelated feature/documentation problems.", "best_pr_reason": null, "canonical_issue_reason": "issue:44242 has the strongest signal in the set (highest discussion and inbound references) and is a concrete, well-scoped bug report, so it is the best single issue to represent this cluster if one must be chosen.", "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:44075", "reason": "Different bugs: one is about SGD optimizer arguments not being applied, the other about a Qwen tie_word_embeddings warning during LoRA fine-tuning.", "right": "issue:44368"}, {"accept": false, "left": "issue:43526", "reason": "One is an image processor label-reduction bug; the other is a pipeline docs/behavior mismatch about return_all_scores.", "right": "issue:43901"}, {"accept": false, "left": "issue:44496", "reason": "Different code paths: model config recognition vs PIL image processor dependency handling.", "right": "issue:45042"}, {"accept": false, "left": "issue:44462", "reason": "Both involve tokenizers, but one is AutoTokenizer ignoring tokenizer.json and the other is a Mistral regex patch crash; not the same underlying defect.", "right": "issue:45081"}, {"accept": false, "left": "issue:44521", "reason": "Different multimodal bugs: assistant mask generation vs rope index / temporal position ID scaling.", "right": "issue:45325"}, {"accept": false, "left": "issue:44242", "reason": "Different model families and failures: MoE load-balancing loss handling vs Gemma 4 mm_token_type_ids defaults.", "right": "issue:45200"}, {"accept": false, "left": "issue:43577", "reason": "Different regressions: BLIP2 dtype propagation vs Qwen3.5 checkpoint save_pretrained correctness.", "right": "issue:45216"}, {"accept": false, "left": "issue:43901", "reason": "Docs/behavior mismatch for text classification vs an unrecognized-model config error; unrelated bugs.", "right": "issue:44496"}, {"accept": false, "left": "issue:44351", "reason": "The second is a broad import-error report; it is not the same concrete missing HybridCache symbol bug.", "right": "issue:45278"}, {"accept": false, "left": "issue:44297", "reason": "Related tokenizer area, but saving the wrong tokenizer_class and ignoring tokenizer.json are distinct problems.", "right": "issue:44462"}, {"accept": false, "left": "issue:44462", "reason": "Both are tokenizer regressions, but they affect different tokenizers and different failure modes; not mergeable as one bug.", "right": "issue:44779"}], "summary": "All suggested soft pairs look like false positives: they share only broad subsystem similarity (tokenizers, multimodal, loading, etc.) but not the same concrete bug or fix path. No PRs are present."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44075|issue:44368", "issue:43526|issue:43901", "issue:44496|issue:45042", "issue:44462|issue:45081", "issue:44521|issue:45325", "issue:44242|issue:45200", "issue:43577|issue:45216", "issue:43901|issue:44496", "issue:44351|issue:45278", "issue:44297|issue:44462", "issue:44462|issue:44779"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6584, "estimated_input_tokens": 3164, "item_count": 17, "node_count": 17, "serialized_chars": 12656, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:56:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e66414199379735595461c82956898fbd9e2b2d574481444f57785a5ac0ed21d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38175", "issue:41950", "issue:43299", "issue:43479", "issue:43525", "issue:43650", "issue:43854", "issue:43976", "issue:44279", "issue:44479", "issue:44496", "issue:44617", "issue:44877", "issue:44964", "issue:45020", "issue:45245", "issue:45405"], "result": {"analyst_result": {"best_issue_reason": "Issue 45020 is the broadest representative of the cluster because it covers the general class of recent model-loading breakages with `remote_code`, but it is not a confirmed duplicate of the others.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43854", "reason": "Both are model-loading failures, but they affect different models and present different breakages; not the same bug.", "right": "issue:44964"}, {"accept": false, "left": "issue:44279", "reason": "One is a vague dependency problem, the other is a specific `MIN_PEFT_VERSION` bump issue; not the same underlying change.", "right": "issue:45405"}, {"accept": false, "left": "issue:43976", "reason": "Python version compatibility failure vs unreleased PEFT minimum version bump; unrelated.", "right": "issue:45405"}, {"accept": false, "left": "issue:44479", "reason": "Video-input regression in VL models vs a category cardinality runtime error; different code paths and symptoms.", "right": "issue:45245"}, {"accept": false, "left": "issue:41950", "reason": "Pipeline processor lookup bug vs an unrelated data-related issue; no shared failure mode.", "right": "issue:43650"}, {"accept": false, "left": "issue:41950", "reason": "Both mention pipeline/model loading regressions, but one is specifically video-classification processor resolution and the other is broad `remote_code` breakage; too different.", "right": "issue:45020"}, {"accept": false, "left": "issue:43479", "reason": "Different multimodal config bugs in different models; not the same config initialization problem.", "right": "issue:43525"}, {"accept": false, "left": "issue:44496", "reason": "Both are config/model loading errors, but the affected models and root causes differ; too broad to merge.", "right": "issue:44877"}, {"accept": false, "left": "issue:38175", "reason": "Zero probabilities in a SigLIP model vs CUDA OOM in Sam3Video; clearly unrelated.", "right": "issue:44617"}, {"accept": false, "left": "issue:43299", "reason": "Different model families and failure modes; no evidence of a shared underlying defect.", "right": "issue:43854"}], "summary": "This cluster is mostly a mix of unrelated Transformers issues around model loading, config handling, version/dependency mismatches, and a few video/VL regressions. None of the soft pairs look like true duplicates; they mostly share broad subsystem vocabulary rather than the same underlying bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43854|issue:44964", "issue:44279|issue:45405", "issue:43976|issue:45405", "issue:44479|issue:45245", "issue:41950|issue:43650", "issue:41950|issue:45020", "issue:43479|issue:43525", "issue:44496|issue:44877", "issue:38175|issue:44617", "issue:43299|issue:43854"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6758, "estimated_input_tokens": 3251, "item_count": 18, "node_count": 18, "serialized_chars": 13001, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:56:35Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e796a8607086aa8beae4d6e7a1cdb6716d7d2409bae4e7b954ab83a941c81109", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39692", "issue:42994", "issue:43295", "issue:43317", "issue:43441", "issue:43450", "issue:43550", "issue:43650", "issue:43723", "issue:43901", "issue:44016", "issue:44162", "issue:44279", "issue:44291", "issue:44683", "issue:44779", "issue:44964", "issue:45290"], "result": {"analyst_result": {"best_issue_reason": "None of the issues is a strong global representative because the cluster lacks one underlying bug or change.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits: the items describe different failure modes across unrelated code paths and documentation areas.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43295", "reason": "Both mention tokenizer regressions in v5, but one is about processor.tokenizer / image handling while the other is Deepseek tokenization output; different bugs and code paths.", "right": "issue:44779"}, {"accept": false, "left": "issue:43441", "reason": "Different attention backends and failures: Ministral-3 FlashAttention breakage vs compiled flex_attention failing on torch >= 2.9.", "right": "issue:44683"}, {"accept": false, "left": "issue:39692", "reason": "Both are documentation-related, but one is a SigLIP2 example with model/processor mismatch plus quantization failure, while the other is a pipeline doc note about return_all_scores.", "right": "issue:43901"}, {"accept": false, "left": "issue:43450", "reason": "Video processor batched-shape bug is unrelated to a generic transformers dependency issue.", "right": "issue:44279"}, {"accept": false, "left": "issue:43723", "reason": "Tokenizer loading in v5 and an ESM2 model regression are distinct problems with different symptoms and likely fixes.", "right": "issue:44162"}, {"accept": false, "left": "issue:43650", "reason": "The titles describe completely different topics: a vague data request vs a chat template crash with tool calls.", "right": "issue:45290"}, {"accept": false, "left": "issue:42994", "reason": "Both involve quantized/dequantized model workflows, but one is saving failure and the other is device_map=auto loading/offload failure; not the same bug.", "right": "issue:43317"}, {"accept": false, "left": "issue:44016", "reason": "A notebook syntax error is unrelated to a TypeError from init_empty_weights and an unexpected _is_hf_initialized argument.", "right": "issue:44291"}, {"accept": false, "left": "issue:43550", "reason": "Different model families and failure modes: Bamba-9B-v2 compile/SDPA issue vs Phi-4 multimodal model loading failure.", "right": "issue:44964"}], "summary": "This cluster is heterogeneous and does not form a true duplicate set. The issues span unrelated bugs in tokenizers, quantization/loading, attention backends, notebooks, processors, and docs, so all soft links should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43295|issue:44779", "issue:43441|issue:44683", "issue:39692|issue:43901", "issue:43450|issue:44279", "issue:43723|issue:44162", "issue:43650|issue:45290", "issue:42994|issue:43317", "issue:44016|issue:44291", "issue:43550|issue:44964"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6814, "estimated_input_tokens": 3279, "item_count": 18, "node_count": 18, "serialized_chars": 13115, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:56:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "37e1ecd87173a8e3b5eb20ac417c1a30a0ee448c2f9ecf1aedc74f61ce3a9f8b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43065", "issue:43122", "issue:43262", "issue:43352", "issue:43650", "issue:43673", "issue:43761", "issue:44261", "issue:44263", "issue:44466", "issue:44521", "issue:44671", "issue:44779", "issue:44898", "issue:45071", "issue:45250", "issue:45276", "issue:45372"], "result": {"analyst_result": {"best_issue_reason": "None of the issues is a good global anchor for the cluster because the similarities are superficial rather than duplicate-level.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits this set; the items cover unrelated regressions across tokenization, audio chat templates, vision models, config typing, serialization, and unsupported-feature errors.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:44261", "reason": "Different bugs in different code paths: MLA layernorm precision/config vs a torch.split return-value issue in a MoE indexer.", "right": "issue:44263"}, {"accept": false, "left": "issue:43065", "reason": "Both are vision-related, but one is about a dummy Conv2d in Sam3PixelDecoder and the other about Perceiver interpolation at non-default resolution; not the same defect.", "right": "issue:44898"}, {"accept": false, "left": "issue:43262", "reason": "Same high-level API name, but one is audio sampling-rate defaulting and the other is assistant-mask generation for multimodal templates; distinct failures.", "right": "issue:44521"}, {"accept": false, "left": "issue:44466", "reason": "Both involve embeddings/lm_head behavior, but one is tied-weight serialization and the other is resize_token_embeddings not updating Gemma 4 outputs; different fixes.", "right": "issue:45276"}, {"accept": false, "left": "issue:44671", "reason": "One is a CamemBERT masked-LM regression, the other a Deepseek tokenizer regression; different components and symptoms.", "right": "issue:44779"}, {"accept": false, "left": "issue:43122", "reason": "The second issue is just a nontechnical placeholder title and does not describe the same tokenization bug.", "right": "issue:43650"}, {"accept": false, "left": "issue:43673", "reason": "Unrelated regressions: chunked-prefill cache handling in generation vs CLIPVisionModel hidden_states output.", "right": "issue:43761"}, {"accept": false, "left": "issue:45071", "reason": "Different breakages: PretrainedConfig type checking versus Gemma 4 processor loading failing on a missing mistral_common import.", "right": "issue:45372"}, {"accept": false, "left": "issue:43352", "reason": "Same Flash Attention theme, but one is a model-specific unsupported-error report and the other is a general Flash Attention 2.0 issue; not the same underlying bug.", "right": "issue:45250"}], "summary": "This cluster is heterogeneous: the soft pairs share only broad themes like a model family, tokenizer, or API name, but not the same concrete bug or fix. All soft edges look like false positives and should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44261|issue:44263", "issue:43065|issue:44898", "issue:43262|issue:44521", "issue:44466|issue:45276", "issue:44671|issue:44779", "issue:43122|issue:43650", "issue:43673|issue:43761", "issue:45071|issue:45372", "issue:43352|issue:45250"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6606, "estimated_input_tokens": 3175, "item_count": 17, "node_count": 17, "serialized_chars": 12700, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:57:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ef40098a4b35763be962a0c7c87883147cf81d5325a2087bd7e03ff013d76174", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42175", "issue:43295", "issue:43475", "issue:43525", "issue:43824", "issue:43881", "issue:43901", "issue:43976", "issue:43994", "issue:44016", "issue:44246", "issue:44589", "issue:44617", "issue:44623", "issue:44749", "issue:45362", "issue:45397"], "result": {"analyst_result": {"best_issue_reason": "No single issue cleanly represents a duplicate cluster here, but issue #44623 is the clearest standalone bug report because it describes a concrete, reproducible missing-files problem with `processor.save_pretrained(...)`.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:42175", "reason": "Different problems: missing TensorFlow backend extra on install vs Python-version compatibility regression. No shared code path or fix.", "right": "issue:43976"}, {"accept": false, "left": "issue:44623", "reason": "`save_pretrained` missing files is unrelated to a Chinese performance regression during filtering. Different symptoms and root causes.", "right": "issue:44749"}, {"accept": false, "left": "issue:43901", "reason": "Docs mismatch for `return_all_scores` is not the same bug as incorrect SigLIP2 model/pipeline outputs.", "right": "issue:43994"}, {"accept": false, "left": "issue:44623", "reason": "Both involve loading/saving flows, but one is missing processor files and the other is a specific `from_pretrained` failure with Gemma-4/Zero3. Not the same bug.", "right": "issue:45397"}, {"accept": false, "left": "issue:43824", "reason": "Import failure for `Qwen2_5_VLForConditionalGeneration` is unrelated to a Python 3.9/3.10 compatibility issue.", "right": "issue:43976"}, {"accept": false, "left": "issue:43295", "reason": "Processor/tokenizer regression and SAM3Video CUDA OOM are different failures with different code paths.", "right": "issue:44617"}, {"accept": false, "left": "issue:43475", "reason": "Missing `fpn_position_embeddings` in SAM3 video output is unrelated to a notebook syntax error.", "right": "issue:44016"}, {"accept": false, "left": "issue:44246", "reason": "Slow `import transformers` is a separate startup/performance issue, not the same as SAM3Video memory exhaustion.", "right": "issue:44617"}, {"accept": false, "left": "issue:43525", "reason": "A missing `pad_token_id` attribute in `Llama4Config` is a different concrete bug from a GLM-4V-9B loading failure.", "right": "issue:43881"}, {"accept": false, "left": "issue:44589", "reason": "Float8 storage lookup failure and Qwen chat crash are distinct runtime errors with different causes and fixes.", "right": "issue:45362"}], "summary": "These items are only superficially similar; the soft pairs span unrelated bugs such as dependency/install issues, model import/load failures, docs mismatches, performance regressions, and runtime errors. I do not see any pair that clearly describes the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42175|issue:43976", "issue:44623|issue:44749", "issue:43901|issue:43994", "issue:44623|issue:45397", "issue:43824|issue:43976", "issue:43295|issue:44617", "issue:43475|issue:44016", "issue:44246|issue:44617", "issue:43525|issue:43881", "issue:44589|issue:45362"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6942, "estimated_input_tokens": 3343, "item_count": 18, "node_count": 18, "serialized_chars": 13370, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:57:47Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5a2b73a85e2a2379aa67d236f84ee1143a63568f95a3bd8d28ddb620b0571341", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41628", "issue:41950", "issue:42491", "issue:43299", "issue:43901", "issue:43976", "issue:44188", "issue:44220", "issue:44246", "issue:44297", "issue:44336", "issue:44361", "issue:44373", "issue:44462", "issue:44496", "issue:44749", "issue:45356", "issue:45397"], "result": {"analyst_result": {"best_issue_reason": "No single issue is a safe global representative; the items are too unrelated to treat any one as a canonical duplicate target.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:44188", "reason": "Attention-kernel divergence under torch.compile is unrelated to ANSI codes emitted by loading_report.", "right": "issue:44336"}, {"accept": false, "left": "issue:43901", "reason": "Docs for return_all_scores vs slow imports are different problems and different code paths.", "right": "issue:44246"}, {"accept": false, "left": "issue:41628", "reason": "Missing AutoImageProcessor import and Python-version compatibility are unrelated issues.", "right": "issue:43976"}, {"accept": false, "left": "issue:44462", "reason": "Both concern tokenizers, but they affect different models and different failure modes; not the same bug.", "right": "issue:45356"}, {"accept": false, "left": "issue:41950", "reason": "Video-classification pipeline choosing image processors is unrelated to _torch_extract_fbank_features() failures.", "right": "issue:44220"}, {"accept": false, "left": "issue:44297", "reason": "Tokenizer save_pretrained metadata mismatch and gemma-4 zero3 from_pretrained are separate loading/serialization problems.", "right": "issue:45397"}, {"accept": false, "left": "issue:44336", "reason": "ANSI code emission in loading_report is unrelated to a wrong docstring for position_ids.", "right": "issue:44373"}, {"accept": false, "left": "issue:44361", "reason": "MLukeTokenizer AttributeError is a functional tokenizer bug, while 44749 reports a performance regression during filtering.", "right": "issue:44749"}, {"accept": false, "left": "issue:42491", "reason": "These both mention Qwen3 MoE loading, but one is LoRA compatibility on hf5.x and the other is Qwen3VL-MoE loading; not clearly the same concrete defect.", "right": "issue:43299"}, {"accept": false, "left": "issue:44246", "reason": "Slow import timing and unrecognized model/config.json handling are distinct issues.", "right": "issue:44496"}], "summary": "This cluster is heterogeneous: it mixes unrelated Transformer issues about pipelines, tokenizers, loading/version compatibility, docs, performance, and serialization. None of the soft pairs looks like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44188|issue:44336", "issue:43901|issue:44246", "issue:41628|issue:43976", "issue:44462|issue:45356", "issue:41950|issue:44220", "issue:44297|issue:45397", "issue:44336|issue:44373", "issue:44361|issue:44749", "issue:42491|issue:43299", "issue:44246|issue:44496"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6526, "estimated_input_tokens": 3135, "item_count": 17, "node_count": 17, "serialized_chars": 12538, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:58:00Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "87abeed49c3ad1e3168de791c1779f1ed421f27d8992447a7bb6ad24ee60c0be", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36246", "issue:43526", "issue:43531", "issue:43673", "issue:43994", "issue:44242", "issue:44291", "issue:44295", "issue:44336", "issue:44351", "issue:44485", "issue:44561", "issue:44857", "issue:45003", "issue:45276", "issue:45362", "issue:45397"], "result": {"analyst_result": {"best_issue_reason": "Issue 45003 is the best representative only as a broad, active core issue; it is open, discussion-heavy, and affects shared modeling_utils behavior rather than a narrow model-specific edge case.", "best_pr_reason": null, "canonical_issue_reason": "No clear canonical issue: the set is heterogeneous and not one underlying defect. If an anchor is needed, issue 45003 is the broadest core-runtime bug in the group.", "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:36246", "reason": "Both are import errors, but for different missing symbols in different modules (`Qwen2_5_VLImageProcessor` vs `HybridCache`); these are separate export issues.", "right": "issue:44351"}, {"accept": false, "left": "issue:43531", "reason": "Both mention Qwen models, but one is a sliding_window/MoE behavior bug and the other is a chat-related crash for Qwen3.5-35B; different code paths and symptoms.", "right": "issue:45362"}, {"accept": false, "left": "issue:44295", "reason": "One is a `position_ids` buffer access error, the other is a Gemma-4 zero3 `from_pretrained` problem; no shared underlying failure mode.", "right": "issue:45397"}, {"accept": false, "left": "issue:43526", "reason": "`reduce_labels` in BEiT image processing is unrelated to unsafe `sys.modules` access in `modeling_utils`; different subsystem and bug class.", "right": "issue:45003"}, {"accept": false, "left": "issue:44336", "reason": "ANSI color output in loading_report and GLM-5 RoPE implementation are unrelated; no shared code path.", "right": "issue:44485"}, {"accept": false, "left": "issue:43994", "reason": "Different models and different behaviors: SigLIP2 AutoModel/pipeline output corruption versus Gemma4 embedding-resize propagation.", "right": "issue:45276"}, {"accept": false, "left": "issue:43673", "reason": "Both are v5 regression reports, but one concerns chunked prefill cache handling and the other trust_remote_code breakage from removing `is_torch_fx_available`; not the same bug.", "right": "issue:44561"}, {"accept": false, "left": "issue:44242", "reason": "MoE router load-balancing loss omission and LwDetrImageLoss AMP crash are unrelated training/runtime failures.", "right": "issue:44857"}, {"accept": false, "left": "issue:44291", "reason": "`init_empty_weights` argument handling and Qwen3.5 chat crashing are different failure modes with different targets and code paths.", "right": "issue:45362"}], "summary": "These items do not form a single duplicate cluster; they span unrelated issues across model-specific import errors, generation/cache behavior, image processors, logging, and core utilities. All soft-similarity pairs are false positives based on shared Transformers vocabulary rather than the same underlying bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:36246|issue:44351", "issue:43531|issue:45362", "issue:44295|issue:45397", "issue:43526|issue:45003", "issue:44336|issue:44485", "issue:43994|issue:45276", "issue:43673|issue:44561", "issue:44242|issue:44857", "issue:44291|issue:45362"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6934, "estimated_input_tokens": 3339, "item_count": 18, "node_count": 18, "serialized_chars": 13354, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:58:21Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f202d36c46db8945d963cb317c7703bfe7a10c12fc55152c5fa99589e4596f8d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36246", "issue:41628", "issue:41950", "issue:42886", "issue:43066", "issue:43441", "issue:43540", "issue:43596", "issue:44162", "issue:44246", "issue:44779", "issue:44871", "issue:44877", "issue:44995", "issue:45200", "issue:45229", "issue:45292", "issue:45375"], "result": {"analyst_result": {"best_issue_reason": "44779 is the best single representative by specificity and impact signal, but only as a stand-in \u2014 the other issues concern different code paths and should not be merged into it.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44779 is the most concrete, regression-style report with the strongest discussion/inbound-reference signal, but it does not actually unify the rest of the set; this cluster is not a true duplicate cluster.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43066", "reason": "Both are tokenizer regressions in v5, but one is a wrong decoder-type issue and the other is Deepseek-specific incorrect output; different underlying bugs and model code paths.", "right": "issue:44779"}, {"accept": false, "left": "issue:44162", "reason": "ESM2 breakage and Gemma 4 mm_token_type_ids handling are unrelated model-specific failures.", "right": "issue:45200"}, {"accept": false, "left": "issue:36246", "reason": "Qwen2_5_VLImageProcessor import failure and a BertModel/DeepSpeed ZeRO-3 IndexError are completely different failure modes.", "right": "issue:43596"}, {"accept": false, "left": "issue:44877", "reason": "Both involve strict config behavior, but one is loading granite_speech config and the other is a missing Qwen3_5MoeVisionConfig field; same mechanism, different concrete bug.", "right": "issue:45375"}, {"accept": false, "left": "issue:42886", "reason": "Offline tokenizer cache loading and Gemma4 multi-GPU CUDA OOM are unrelated.", "right": "issue:45229"}, {"accept": false, "left": "issue:43441", "reason": "Ministral FlashAttention failure and Qwen3OmniMoe video-processing ValueError affect different models and different code paths.", "right": "issue:43540"}, {"accept": false, "left": "issue:41628", "reason": "Both mention image processors, but one is an import error and the other is a video-classification pipeline mis-resolving processor type; not the same bug.", "right": "issue:41950"}, {"accept": false, "left": "issue:44995", "reason": "A stale indexer cache crash on second forward pass is unrelated to Gemma 4 token-type defaults for text-only fine-tuning.", "right": "issue:45200"}, {"accept": false, "left": "issue:44246", "reason": "Slow import timing is unrelated to Gemma-3 eos_token_id configuration mismatch.", "right": "issue:44871"}, {"accept": false, "left": "issue:44779", "reason": "Deepseek tokenizer regression and resize_token_embeddings not updating output embeddings are different token/model weight update bugs.", "right": "issue:45292"}], "summary": "The cluster is heterogeneous: all proposed soft pairs are false positives, mostly sharing only broad subsystem vocabulary (tokenizers, configs, vision/video, or import paths) rather than the same concrete bug. No true duplicate group stands out."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43066|issue:44779", "issue:44162|issue:45200", "issue:36246|issue:43596", "issue:44877|issue:45375", "issue:42886|issue:45229", "issue:43441|issue:43540", "issue:41628|issue:41950", "issue:44995|issue:45200", "issue:44246|issue:44871", "issue:44779|issue:45292"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6558, "estimated_input_tokens": 3151, "item_count": 17, "node_count": 17, "serialized_chars": 12604, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:58:37Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4e4235c5dddbaccfef484daf3ae3eb4c61759d45d0f184f735ab0a80a08809fc", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43065", "issue:43232", "issue:43257", "issue:43262", "issue:43295", "issue:43408", "issue:43475", "issue:43540", "issue:43901", "issue:44246", "issue:44295", "issue:44336", "issue:44496", "issue:44749", "issue:45003", "issue:45137", "issue:45292"], "result": {"analyst_result": {"best_issue_reason": "No single issue is a good global representative, because the set is not a true duplicate cluster and spans unrelated topics.", "best_pr_reason": null, "canonical_issue_reason": "There is no clear canonical issue for a duplicate cluster here: the issues describe distinct bugs in different code paths and products, so they should remain separate.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43232", "reason": "Different bugs in different areas: generation kwarg syncing vs terminal/ANSI rendering in loading reports.", "right": "issue:44336"}, {"accept": false, "left": "issue:43901", "reason": "Docs mismatch for TextClassificationPipeline is unrelated to unsafe sys.modules access in modeling_utils.", "right": "issue:45003"}, {"accept": false, "left": "issue:44295", "reason": "Reading position_ids after buffer registration is unrelated to the reported slowdown in data filtering after upgrading Transformers.", "right": "issue:44749"}, {"accept": false, "left": "issue:43065", "reason": "SAM3 pixel decoder dummy Conv2d and sam3_video/sam3_tracker warning are different configuration/model-selection issues.", "right": "issue:43408"}, {"accept": false, "left": "issue:43262", "reason": "Audio chat-template sample rate defaulting to 16kHz is unrelated to intermittent import latency.", "right": "issue:44246"}, {"accept": false, "left": "issue:44496", "reason": "Unrecognized model/config loading and unsafe sys.modules access are separate loader issues with different failure modes.", "right": "issue:45003"}, {"accept": false, "left": "issue:43257", "reason": "Qwen3 MoE weight conversion under accelerate+deepspeed and ZeRO3 deque underflow are different DeepSpeed-related bugs.", "right": "issue:45137"}, {"accept": false, "left": "issue:43475", "reason": "Missing fpn_position_embeddings in SAM3 Vision output is unrelated to the Qwen3OmniMoe video input ValueError.", "right": "issue:43540"}, {"accept": false, "left": "issue:43295", "reason": "Processor.tokenizer regression and resize_token_embeddings not updating output embeddings are different model/processor API bugs.", "right": "issue:45292"}], "summary": "These items are mostly unrelated issue reports across different subsystems (generation, loading, audio processors, SAM3, DeepSpeed, docs, import latency, etc.). None of the soft pairs look like the same underlying bug or change, so no soft edges should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43232|issue:44336", "issue:43901|issue:45003", "issue:44295|issue:44749", "issue:43065|issue:43408", "issue:43262|issue:44246", "issue:44496|issue:45003", "issue:43257|issue:45137", "issue:43475|issue:43540", "issue:43295|issue:45292"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7236, "estimated_input_tokens": 3490, "item_count": 18, "node_count": 18, "serialized_chars": 13958, "soft_pair_count": 11}, "cached_at": "2026-04-14T12:58:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9408f24b7011b2bdae0d04b0d0d260648f71a27d907bbb902ac4fa6266f6266a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43232", "issue:43441", "issue:43606", "issue:43653", "issue:43673", "issue:43931", "issue:44060", "issue:44077", "issue:44368", "issue:44442", "issue:44466", "issue:44734", "issue:44749", "issue:44869", "issue:44977", "issue:45278", "issue:45335", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "Issue 43673 is the best stand-alone representative: it describes a concrete, versioned regression in GenerationMixin cache handling during chunked prefill, with a clear code path and failure mode.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:44060", "reason": "Both mention model internals, but one is a tied-weights warning in Qwen3-Next and the other is a post_init policy issue in patchtsmixer; different bugs and fixes.", "right": "issue:44077"}, {"accept": false, "left": "issue:43673", "reason": "Both involve generation/cache flow, but one is a missing cache in v5 chunked_prefill and the other is a KV-cache continuation crash from wrong tensor indexing in serve; not the same defect.", "right": "issue:44734"}, {"accept": false, "left": "issue:44869", "reason": "Different models and failure modes: Whisper timestamp decoding crash vs Kimi-K2.5 tokenizer codec handling/warning behavior.", "right": "issue:45356"}, {"accept": false, "left": "issue:44368", "reason": "Both are Qwen3.5-related, but one is a tied-embeddings warning during LoRA fine-tuning and the other is a flash-attention generation failure; not mergeable as one bug.", "right": "issue:44977"}, {"accept": false, "left": "issue:44466", "reason": "Both touch embeddings/tied weights, but one is serialization of lm_head.weight depending on device and the other is resize_token_embeddings not updating decoder.embed_tokens; distinct code paths.", "right": "issue:45335"}, {"accept": false, "left": "issue:43653", "reason": "BigBirdTokenizer special-token registration causing empty decode output is unrelated to the Chinese report about slower filtering after upgrade.", "right": "issue:44749"}, {"accept": false, "left": "issue:44442", "reason": "AutoTokenizer loading failure for FastSpeech2ConformerTokenizer and the upgrade-related filtering slowdown are different problems in different areas.", "right": "issue:44749"}, {"accept": false, "left": "issue:43232", "reason": "Both are generation-related, but one is about update_model_kwargs_for_generation after sync_gpus and the other is cache absence during chunked prefill; separate issues.", "right": "issue:43673"}, {"accept": false, "left": "issue:43931", "reason": "Qwen3-VL weight-shape mismatch on load is unrelated to a tie_word_embeddings warning during ms-swift LoRA fine-tuning.", "right": "issue:44368"}, {"accept": false, "left": "issue:43441", "reason": "FlashAttention failure in Ministral-3 and CPU offload device mismatch in bark-small are different runtime regressions with different triggers.", "right": "issue:43606"}, {"accept": false, "left": "issue:43673", "reason": "GenerationMixin cache regression and broad import errors after upgrading to 5.5.0 are not the same bug.", "right": "issue:45278"}], "summary": "This set is mostly a loose thematic cluster of unrelated issues across generation, tokenizers, model loading, tied weights, and runtime regressions. The soft-similarity pairs share vocabulary or subsystem names, but none look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44060|issue:44077", "issue:43673|issue:44734", "issue:44869|issue:45356", "issue:44368|issue:44977", "issue:44466|issue:45335", "issue:43653|issue:44749", "issue:44442|issue:44749", "issue:43232|issue:43673", "issue:43931|issue:44368", "issue:43441|issue:43606", "issue:43673|issue:45278"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6954, "estimated_input_tokens": 3349, "item_count": 18, "node_count": 18, "serialized_chars": 13393, "soft_pair_count": 10}, "cached_at": "2026-04-14T12:59:18Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "12d8d4a6e388a73d3ea905c95e5f968a05bc2e1b240214b520af364dfa61edae", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:28282", "issue:30990", "issue:36246", "issue:42371", "issue:42673", "issue:42898", "issue:43381", "issue:43452", "issue:43646", "issue:43704", "issue:43824", "issue:44162", "issue:44297", "issue:44623", "issue:45003", "issue:45092", "issue:45292", "issue:45335"], "result": {"analyst_result": {"best_issue_reason": "Issue 42673 is the best overall issue candidate because it is an exact duplicate target for 43704 and describes a specific, mergeable bug report. The Qwen2.5 VL import issues are also close, but less exact.", "best_pr_reason": null, "canonical_issue_reason": "Issue 42673 is the clearest canonical issue because it has the exact duplicate title and the most direct, concrete symptom for the Qwen3 VRAM leak. Issue 36246 is the other strong duplicate-family candidate, but 42673 is the cleanest anchor.", "canonical_pr_reason": null, "confidence": 0.86, "soft_edge_verdicts": [{"accept": false, "left": "issue:43646", "reason": "Both involve model-loading / v5-era breakage, but they describe different concrete failures and likely different code paths.", "right": "issue:44162"}, {"accept": true, "left": "issue:42673", "reason": "Same exact title and same reported bug: Qwen3ForCausalLM VRAM leak when used in multiple dataloader threads.", "right": "issue:43704"}, {"accept": false, "left": "issue:44297", "reason": "Both are save_pretrained regressions, but one is about tokenizer_class metadata and the other about processor files; not the same underlying bug.", "right": "issue:44623"}, {"accept": false, "left": "issue:28282", "reason": "ImportError for missing PyTorch vs sentence-transformers getting stuck loading are different symptoms and likely different root causes.", "right": "issue:30990"}, {"accept": false, "left": "issue:43381", "reason": "Gradient checkpointing in eval mode and resize_token_embeddings not updating output embeddings are separate behaviors.", "right": "issue:45292"}, {"accept": false, "left": "issue:42898", "reason": "clean_up_tokenization_spaces v5 behavior change is unrelated to the ESM2 breakage report.", "right": "issue:44162"}, {"accept": true, "left": "issue:36246", "reason": "Both are import-name failures in the Qwen2.5 VL namespace and plausibly stem from the same missing public export/module init issue.", "right": "issue:43824"}, {"accept": false, "left": "issue:43381", "reason": "These concern different embedding/checkpointing behaviors and do not appear to be the same bug.", "right": "issue:45335"}, {"accept": false, "left": "issue:42371", "reason": "TF32 API guidance and InternVL2 meta-initialization incompatibility are distinct issues.", "right": "issue:45092"}, {"accept": false, "left": "issue:43452", "reason": "gguf_file from_pretrained breakage and unsafe sys.modules access are different bugs, even if both touch loading internals.", "right": "issue:45003"}], "summary": "This set is mostly heterogeneous issue reports, with two clear duplicate-like pairs: the repeated Qwen3 VRAM leak report and the Qwen2.5 VL import/export failures. The rest are related only at a broad subsystem level, not the same concrete bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43646|issue:44162", "issue:42673|issue:43704", "issue:44297|issue:44623", "issue:28282|issue:30990", "issue:43381|issue:45292", "issue:42898|issue:44162", "issue:36246|issue:43824", "issue:43381|issue:45335", "issue:42371|issue:45092", "issue:43452|issue:45003"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6498, "estimated_input_tokens": 3121, "item_count": 17, "node_count": 17, "serialized_chars": 12481, "soft_pair_count": 9}, "cached_at": "2026-04-14T12:59:43Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "30b7b660df7e871ac808fa4974980768396ebc00744adb0227d15082b9339f9b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41628", "issue:42222", "issue:42994", "issue:43475", "issue:43525", "issue:43643", "issue:43873", "issue:44038", "issue:44623", "issue:44933", "issue:44945", "issue:45081", "issue:45103", "issue:45200", "issue:45310", "issue:45375", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "43643 is the best umbrella issue because it is general, clearly describes a concrete field-loss problem, and is more likely than the model-specific reports to represent a shared root cause category.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43643 is the broadest and most reusable bug report in the set: it describes missing fields after `AutoConfig.from_pretrained(..., trust_remote_code=True)`, which matches the common failure pattern behind several other config-related reports.", "canonical_pr_reason": null, "confidence": 0.42, "soft_edge_verdicts": [{"accept": false, "left": "issue:43475", "reason": "Both are video/vision-position bugs, but they affect different models and different attributes (`fpn_position_embeddings` vs `vision_position_ids`), so this does not look like the same concrete defect.", "right": "issue:45381"}, {"accept": false, "left": "issue:41628", "reason": "Both involve import exposure, but one is a top-level `AutoImageProcessor` import failure and the other is a missing import from `image_utils`; too little evidence they share one code path.", "right": "issue:44933"}, {"accept": false, "left": "issue:45081", "reason": "Tokenizer regex loading and auto-docstring annotation handling are unrelated subsystems with different failure modes.", "right": "issue:45103"}, {"accept": false, "left": "issue:42222", "reason": "These are model-specific config/shape issues for different architectures (`vitpose` vs `Gemma 4`), not the same underlying bug.", "right": "issue:45200"}, {"accept": false, "left": "issue:43475", "reason": "Both mention missing fields on returned objects, but one is SAM 3 Video internals and the other is generic `AutoConfig` remote-code loading; the concrete code paths differ.", "right": "issue:43643"}, {"accept": false, "left": "issue:42994", "reason": "Quantized model saving and missing `pad_token_id` on `Llama4Config` are different problems; no clear shared fix.", "right": "issue:43525"}, {"accept": false, "left": "issue:45310", "reason": "They are related only by Qwen3.5/MoE naming; one is a generic `from_pretrained` failure, the other is a specific missing vision-config field. Not enough to treat as one bug.", "right": "issue:45375"}, {"accept": false, "left": "issue:44038", "reason": "Qwen3-VL-Moe behavior under transformers 5.0 and missing processor save files are separate issues affecting different layers.", "right": "issue:44623"}, {"accept": false, "left": "issue:43873", "reason": "Quantization offloading and pipeline-parallel LLM output correctness are unrelated defects.", "right": "issue:44945"}], "summary": "This cluster is mostly a loose similarity set of model/config/import regressions, not a tight duplicate group. The only plausible unifying theme is config-field loss during loading/saving, but even there the affected models and code paths differ too much to merge confidently."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43475|issue:45381", "issue:41628|issue:44933", "issue:45081|issue:45103", "issue:42222|issue:45200", "issue:43475|issue:43643", "issue:42994|issue:43525", "issue:45310|issue:45375", "issue:44038|issue:44623", "issue:43873|issue:44945"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7096, "estimated_input_tokens": 3420, "item_count": 18, "node_count": 18, "serialized_chars": 13677, "soft_pair_count": 12}, "cached_at": "2026-04-14T13:00:03Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "33ade7736bb8725da86035a2e510ad409c73b72a0618b7566e424c749e1765d4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:41628", "issue:42831", "issue:42994", "issue:43232", "issue:43475", "issue:43525", "issue:43673", "issue:44164", "issue:44188", "issue:44222", "issue:44351", "issue:44661", "issue:45230", "issue:45310", "issue:45362", "issue:45375", "issue:45397"], "result": {"analyst_result": {"best_issue_reason": "Issue 44164 is the clearest, most actionable report: it describes a specific save/from_pretrained serialization failure and is more well-scoped than the others.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out: the items span multiple unrelated failure modes and models, and the candidate pairings are only broad thematic overlaps rather than true duplicates.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:45230", "reason": "45230 is an uninformative generic bug report; 45397 is a specific Gemma-4 ZeRO3 from_pretrained issue. Too little evidence they are the same bug.", "right": "issue:45397"}, {"accept": false, "left": "issue:43475", "reason": "Different problem domains: SAM3 video attribute access vs add-new-model-like failing on TOKENIZER_MAPPING_NAMES.", "right": "issue:44661"}, {"accept": false, "left": "issue:45362", "reason": "Both involve Qwen3.5, but one is a chat crash and the other is a config-field omission; distinct failures.", "right": "issue:45375"}, {"accept": false, "left": "issue:43232", "reason": "Both are generation-related, but one is a sync_gpus/_update_model_kwargs bug and the other is attention-kernel divergence under torch.compile.", "right": "issue:44188"}, {"accept": false, "left": "issue:41628", "reason": "Both are import errors, but for different symbols and likely different export regressions; not the same concrete bug.", "right": "issue:44351"}, {"accept": false, "left": "issue:44222", "reason": "One is FP8 save_pretrained for MoE, the other is a Qwen3.5 MoE from_pretrained error; same broad area, different operation and symptom.", "right": "issue:45310"}, {"accept": false, "left": "issue:42831", "reason": "Accuracy regression in FineGrainedFP8 vs a MoE save_pretrained failure; not the same underlying issue.", "right": "issue:44222"}, {"accept": false, "left": "issue:42994", "reason": "Both concern saving/loading, but one is quantized model saving and the other is extra_state handling; too different to merge.", "right": "issue:44164"}, {"accept": false, "left": "issue:36010", "reason": "Both are import errors from transformers, but they concern different missing exports and likely separate fixes.", "right": "issue:41628"}, {"accept": false, "left": "issue:43525", "reason": "Different model configs and missing fields: Llama4 pad_token_id vs Qwen3_5MoeVisionConfig deepstack_visual_indexes.", "right": "issue:45375"}, {"accept": false, "left": "issue:43673", "reason": "GenerationMixin cache missing during chunked_prefill is unrelated to attention-kernel divergence under torch.compile.", "right": "issue:44188"}, {"accept": false, "left": "issue:45310", "reason": "Both involve Qwen3.5/Gemma-style pretrained flows, but the reported failures are different and not plausibly one merged fix.", "right": "issue:45397"}], "summary": "This cluster is a mixed set of unrelated Transformers issues. Several share broad themes (imports, generation, save/from_pretrained, Qwen/MoE), but none of the soft pairs look like the same concrete bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:45230|issue:45397", "issue:43475|issue:44661", "issue:45362|issue:45375", "issue:43232|issue:44188", "issue:41628|issue:44351", "issue:44222|issue:45310", "issue:42831|issue:44222", "issue:42994|issue:44164", "issue:36010|issue:41628", "issue:43525|issue:45375", "issue:43673|issue:44188", "issue:45310|issue:45397"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6966, "estimated_input_tokens": 3355, "item_count": 18, "node_count": 18, "serialized_chars": 13417, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:00:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "59f105b82dc3688a26a4270109ac761060082244619f3240a2854fe0e27f592f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30990", "issue:35141", "issue:42175", "issue:42898", "issue:43232", "issue:43377", "issue:43479", "issue:43784", "issue:43824", "issue:43976", "issue:44038", "issue:44079", "issue:44242", "issue:44568", "issue:44792", "issue:44869", "issue:44933", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": "issue:44242 is the best overall representative only in the sense of signal quality and specificity; it looks like a concrete, well-scoped bug with clear impact.", "best_pr_reason": null, "canonical_issue_reason": "issue:44242 is the strongest standalone bug report in the set: it has the most inbound references, substantial discussion, and a precise, actionable defect description.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:30990", "reason": "Different problems: one is Sentence Transformers hanging on load, the other is token embedding resizing reinitializing outputs in post_init.", "right": "issue:35141"}, {"accept": false, "left": "issue:43232", "reason": "Different code paths and symptoms: generation kwargs syncing vs a Janus image-generation test failure.", "right": "issue:44792"}, {"accept": false, "left": "issue:43479", "reason": "Unrelated bugs: multimodal config default initialization vs ModelOutput key assignment when a prior value was None.", "right": "issue:44079"}, {"accept": false, "left": "issue:43377", "reason": "Both are model behavior bugs, but they affect different mechanisms: MIMI padding-mask batching vs MoE load-balancing loss computation.", "right": "issue:44242"}, {"accept": false, "left": "issue:44038", "reason": "Same broad Qwen family, but not the same concrete bug: Qwen3-VL-Moe vs Qwen3.5 Moe from_pretrained error.", "right": "issue:45310"}, {"accept": false, "left": "issue:43479", "reason": "Different areas entirely: config initialization defaults vs Whisper word-timestamp decoding crash.", "right": "issue:44869"}, {"accept": false, "left": "issue:42175", "reason": "No shared underlying defect: pip extras/backend dependency issue vs a missing import from image_utils.", "right": "issue:44933"}, {"accept": false, "left": "issue:42898", "reason": "Both relate to tokenizer behavior in v5, but they are distinct regressions affecting different APIs and models.", "right": "issue:44568"}, {"accept": false, "left": "issue:43976", "reason": "Completely unrelated: Python version compatibility for transformers vs a missing import from image_utils.", "right": "issue:44933"}, {"accept": false, "left": "issue:43784", "reason": "Both are import-time failures, but they involve different missing symbols and different packages/models.", "right": "issue:43824"}], "summary": "No duplicate clusters here: all proposed soft pairs are semantically unrelated or only loosely related by subsystem/version. I would keep them separate; there is no PR evidence to merge."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:30990|issue:35141", "issue:43232|issue:44792", "issue:43479|issue:44079", "issue:43377|issue:44242", "issue:44038|issue:45310", "issue:43479|issue:44869", "issue:42175|issue:44933", "issue:42898|issue:44568", "issue:43976|issue:44933", "issue:43784|issue:43824"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7026, "estimated_input_tokens": 3385, "item_count": 18, "node_count": 18, "serialized_chars": 13539, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:00:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "23566a1f9defacab48de8ecdc6b5f66a8b235b55883b6219bb7c29de806f140e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42898", "issue:43278", "issue:43723", "issue:43874", "issue:44060", "issue:44188", "issue:44373", "issue:44568", "issue:44623", "issue:44734", "issue:44779", "issue:44964", "issue:45092", "issue:45103", "issue:45125", "issue:45137", "issue:45310", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "issue:44188 is the most actionable representative issue to surface: it has a clear runtime bug, strong discussion activity, and is still open.", "best_pr_reason": "No PRs are present in this cluster.", "canonical_issue_reason": "issue:44188 is the best available anchor because it is open, concrete, and actively discussed; however, the set is not a real duplicate cluster.", "canonical_pr_reason": "No PRs are present in this cluster.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:44188", "reason": "Different problems: attention-kernel divergence under torch.compile vs a wrong docstring for position_ids.", "right": "issue:44373"}, {"accept": false, "left": "issue:44060", "reason": "Both involve Qwen models, but the bugs are different: tied-weights warning misbinding vs missing _tp_plan for tensor parallelism.", "right": "issue:45125"}, {"accept": false, "left": "issue:44964", "reason": "Unrelated failures: loading Phi-4 multimodal checkpoints vs DeepSpeed ZeRO3 deque underflow.", "right": "issue:45137"}, {"accept": false, "left": "issue:44734", "reason": "Serving KV-cache indexing crash is unrelated to the auto_docstring AttributeError from future annotations.", "right": "issue:45103"}, {"accept": false, "left": "issue:43723", "reason": "Both are tokenizer-related, but one is AutoTokenizer loading in v5 and the other is add_special_tokens missing BOS/EOS for a specific tokenizer.", "right": "issue:44568"}, {"accept": false, "left": "issue:42898", "reason": "Both concern tokenizer behavior changes, but they affect different tokenizers with different regressions and symptoms.", "right": "issue:45356"}, {"accept": false, "left": "issue:44623", "reason": "Processor save_pretrained missing files is not the same bug as Qwen3.5 Moe from_pretrained failing.", "right": "issue:45310"}, {"accept": false, "left": "issue:43278", "reason": "Embedding dtype drift in evaluate has no connection to the ZeRO3 empty-deque error.", "right": "issue:45137"}, {"accept": false, "left": "issue:43874", "reason": "Both are multimodal/model-loading issues, but one is a missing image-patch method and the other is remote-code incompatibility with meta init.", "right": "issue:45092"}, {"accept": false, "left": "issue:42898", "reason": "Different tokenizer regressions: clean_up_tokenization_spaces behavior vs Deepseek tokenizer output correctness.", "right": "issue:44779"}], "summary": "This cluster is heterogeneous: it mixes unrelated tokenizer regressions, model-loading failures, serving bugs, and documentation/typing issues. None of the soft pairs look like true duplicates of the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44188|issue:44373", "issue:44060|issue:45125", "issue:44964|issue:45137", "issue:44734|issue:45103", "issue:43723|issue:44568", "issue:42898|issue:45356", "issue:44623|issue:45310", "issue:43278|issue:45137", "issue:43874|issue:45092", "issue:42898|issue:44779"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6960, "estimated_input_tokens": 3352, "item_count": 18, "node_count": 18, "serialized_chars": 13406, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:00:59Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a6aa03543cdf634066192762922f81fbd6bfd57290312e469a525782d7792972", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42898", "issue:43295", "issue:43479", "issue:43644", "issue:44077", "issue:44206", "issue:44222", "issue:44242", "issue:44263", "issue:44297", "issue:44360", "issue:44448", "issue:44483", "issue:44484", "issue:45103", "issue:45137", "issue:45276", "issue:45406"], "result": {"analyst_result": {"best_issue_reason": "44448 is the best single issue to represent the cluster\u2019s overall theme of v5 regressions, even though it is not a duplicate of the others.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44448 is the broadest user-facing regression report here: it frames an explicit v4-vs-v5 parity break, which is more representative of the cluster than the narrower subsystem-specific bugs.", "canonical_pr_reason": null, "confidence": 0.22, "soft_edge_verdicts": [{"accept": false, "left": "issue:44077", "reason": "Both are v5-era model behavior issues, but one is about post_init support and the other about missing MoE load-balancing loss; different code paths and fixes.", "right": "issue:44242"}, {"accept": false, "left": "issue:42898", "reason": "Both mention v5 behavior changes, but one is about tokenization cleanup semantics and the other is a model output parity regression; not the same bug.", "right": "issue:44448"}, {"accept": false, "left": "issue:45137", "reason": "DeepSpeed ZeRO3 deque failure and Gemma4 embedding resize behavior are unrelated failures in different parts of the stack.", "right": "issue:45276"}, {"accept": false, "left": "issue:42898", "reason": "Tokenizer cleanup behavior and processor.tokenizer/image-passing regression are distinct API breakages, not one underlying defect.", "right": "issue:43295"}, {"accept": false, "left": "issue:43479", "reason": "Default multimodal config initialization and non-persistent buffer initialization are separate initialization bugs with different affected objects.", "right": "issue:43644"}, {"accept": false, "left": "issue:44206", "reason": "Feature extractor unsupported-arg crash and auto_docstring AttributeError are unrelated regressions.", "right": "issue:45103"}, {"accept": false, "left": "issue:44222", "reason": "FP8 save_pretrained on MoE models and tokenizer_class mismatch in tokenizer_config.json affect different save paths.", "right": "issue:44297"}, {"accept": false, "left": "issue:44483", "reason": "Both are serving/API-related, but one is a /v1/chat/completions request rejection and the other is a Gemma4Processor attribute error; not the same issue.", "right": "issue:45406"}, {"accept": false, "left": "issue:44222", "reason": "MoE FP8 save_pretrained and the 50GB max_shard_size question are unrelated save_pretrained concerns.", "right": "issue:44484"}, {"accept": false, "left": "issue:44263", "reason": "These are both DSA-indexer related, but one concerns torch.split return handling and the other a missing ReLU; different concrete code-path problems.", "right": "issue:44360"}], "summary": "This cluster is a loose collection of Transformers v5-era regression/bug reports across different subsystems (tokenization, processors, multimodal configs, MoE, serving, DeepSpeed, etc.). The soft-similarity links are thematic rather than true duplicates, so none should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44077|issue:44242", "issue:42898|issue:44448", "issue:45137|issue:45276", "issue:42898|issue:43295", "issue:43479|issue:43644", "issue:44206|issue:45103", "issue:44222|issue:44297", "issue:44483|issue:45406", "issue:44222|issue:44484", "issue:44263|issue:44360"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7110, "estimated_input_tokens": 3427, "item_count": 18, "node_count": 18, "serialized_chars": 13706, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:01:18Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "28f03829f80551cdb5fb45dfc563e0f1639c72ecfcec72284cf4274ed415d73c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:22355", "issue:28282", "issue:42831", "issue:43479", "issue:43606", "issue:43723", "issue:44222", "issue:44261", "issue:44263", "issue:44297", "issue:44514", "issue:44623", "issue:44871", "issue:44913", "issue:44977", "issue:45092", "issue:45103", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": "No good global representative exists. If forced, 44623 is the broadest serialization-related issue, but it still does not cover most of the other items.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits this set; the issues span distinct subsystems and failure modes, so choosing one would misrepresent the cluster.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:42831", "reason": "FineGrainedFP8 accuracy regression is unrelated to processor.save_pretrained missing files.", "right": "issue:44623"}, {"accept": false, "left": "issue:44222", "reason": "Both mention save_pretrained, but one is FP8/MoE weight saving and the other is missing processor files; different bugs and code paths.", "right": "issue:44623"}, {"accept": false, "left": "issue:43723", "reason": "Tokenizer loading failure in v5 and Qwen3.5 flash-attention generation failure are separate problems.", "right": "issue:44977"}, {"accept": false, "left": "issue:22355", "reason": "Both are import/dependency errors, but one is missing transformers.onnx and the other is missing PyTorch for AutoModel; not the same bug.", "right": "issue:28282"}, {"accept": false, "left": "issue:44263", "reason": "GlmMoeDsaIndexer torch.split return-value issue is not the same as InternVL2 v5 meta-initialization incompatibility.", "right": "issue:45092"}, {"accept": false, "left": "issue:44514", "reason": "A batched apply_chat_template crash and an auto_docstring AttributeError under future annotations are unrelated failures.", "right": "issue:45103"}, {"accept": false, "left": "issue:44297", "reason": "Tokenizer save_pretrained metadata mismatch is different from a Qwen3.5 MoE from_pretrained load error.", "right": "issue:45310"}, {"accept": false, "left": "issue:43606", "reason": "CPU offload device mismatch for suno/bark-small is unrelated to missing processor save_pretrained files.", "right": "issue:44623"}, {"accept": false, "left": "issue:43479", "reason": "Both involve config persistence, but Phi4MultimodalConfig None-handling and GPTNeoXConfig rotary_pct reload are different fields and bugs.", "right": "issue:44913"}, {"accept": false, "left": "issue:43723", "reason": "Tokenizer loading failure and Gemma-3 eos_token_id inconsistency are distinct issues.", "right": "issue:44871"}, {"accept": false, "left": "issue:44261", "reason": "MLA rms_norm_eps precision bug is not the same underlying problem as remote-code meta-init incompatibility.", "right": "issue:45092"}], "summary": "The cluster is highly heterogeneous: the items are mostly unrelated bugs that only share broad themes like loading, configuration, or save/pretrained behavior. None of the proposed soft edges look like true duplicates or the same fixable code-path issue."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42831|issue:44623", "issue:44222|issue:44623", "issue:43723|issue:44977", "issue:22355|issue:28282", "issue:44263|issue:45092", "issue:44514|issue:45103", "issue:44297|issue:45310", "issue:43606|issue:44623", "issue:43479|issue:44913", "issue:43723|issue:44871", "issue:44261|issue:45092"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6410, "estimated_input_tokens": 3077, "item_count": 17, "node_count": 17, "serialized_chars": 12308, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:01:35Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fcffae4a3f113caa2f15b9b2b6050d259b23590a0d53124a118fc6120c73d369", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30990", "issue:38175", "issue:41628", "issue:42831", "issue:42886", "issue:42994", "issue:43723", "issue:43824", "issue:44295", "issue:44373", "issue:44442", "issue:44485", "issue:44623", "issue:44991", "issue:45092", "issue:45310", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": "issue:43723 is the best representative for the tokenizer-loading subset, but the overall cluster is too mixed for a single issue to represent everything well.", "best_pr_reason": null, "canonical_issue_reason": "issue:43723 is the broadest and most central tokenizer-loading regression among the items, but it still only covers one subtheme and does not subsume the rest of the cluster.", "canonical_pr_reason": null, "confidence": 0.87, "soft_edge_verdicts": [{"accept": false, "left": "issue:44373", "reason": "Docstring mismatch vs RoPE implementation discussion; different bug types and no shared failing code path.", "right": "issue:44485"}, {"accept": false, "left": "issue:43723", "reason": "Both are tokenizer-loading failures, but they affect different models and likely different failure modes.", "right": "issue:44991"}, {"accept": false, "left": "issue:45092", "reason": "Different model families and different problems: meta-init incompatibility vs incorrect save_pretrained keys.", "right": "issue:45357"}, {"accept": false, "left": "issue:42886", "reason": "Offline tokenizer cache loading and quantized model saving are unrelated subsystems.", "right": "issue:42994"}, {"accept": false, "left": "issue:43723", "reason": "Generic AutoTokenizer regression vs FastSpeech2ConformerTokenizer-specific loading failure; too different to merge as one bug.", "right": "issue:44442"}, {"accept": false, "left": "issue:30990", "reason": "Sentence Transformers loading hang and zero probabilities in SigLIP2 are unrelated symptoms.", "right": "issue:38175"}, {"accept": false, "left": "issue:44295", "reason": "Position_ids buffer access error and missing processor save files are different code paths.", "right": "issue:44623"}, {"accept": false, "left": "issue:41628", "reason": "Both are import errors, but for different symbols/models and likely separate export issues.", "right": "issue:43824"}, {"accept": false, "left": "issue:42831", "reason": "FineGrainedFP8 accuracy degradation and Qwen3.5 MoE from_pretrained failure are not the same underlying bug.", "right": "issue:45310"}], "summary": "The set is heterogeneous, with only a loose recurring theme around tokenizer-loading regressions. None of the soft pairs look like true duplicates or the same fixable code path, so all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44373|issue:44485", "issue:43723|issue:44991", "issue:45092|issue:45357", "issue:42886|issue:42994", "issue:43723|issue:44442", "issue:30990|issue:38175", "issue:44295|issue:44623", "issue:41628|issue:43824", "issue:42831|issue:45310"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6818, "estimated_input_tokens": 3281, "item_count": 18, "node_count": 18, "serialized_chars": 13122, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:01:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f96f9285c563693295696f33a4f9a30accb332b9d63b52172fa557cc4b573d51", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30990", "issue:33357", "issue:42371", "issue:42831", "issue:42886", "issue:42907", "issue:43066", "issue:43122", "issue:43576", "issue:43784", "issue:44038", "issue:44117", "issue:44263", "issue:44373", "issue:44514", "issue:44521", "issue:44704", "issue:44945"], "result": {"analyst_result": {"best_issue_reason": "Issue 44514 is the best standalone issue to keep because it is precise, reproducible, and tied to a specific code path (`apply_chat_template` on batched input with `padding=False`).", "best_pr_reason": null, "canonical_issue_reason": "Issue 44514 is the clearest, most actionable bug report in the set: it names a specific API, a concrete crash condition, and a reproducible input shape. If a single issue must represent this set, it is the strongest candidate.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43576", "reason": "Different failures: CLI `transformers env` breakage vs import-time `NameError` in sentence-transformers integration. Same ecosystem, not the same bug.", "right": "issue:43784"}, {"accept": false, "left": "issue:44263", "reason": "One is a runtime tensor-splitting/model issue; the other is a docstring problem. No shared underlying code defect.", "right": "issue:44373"}, {"accept": false, "left": "issue:43122", "reason": "Tokenization output drift and multimodal assistant-mask corruption are distinct symptoms in different processor paths.", "right": "issue:44521"}, {"accept": false, "left": "issue:42831", "reason": "FP8 accuracy regression and a Qwen3-VL-Moe bug are unrelated concrete issues; too broad to be duplicates.", "right": "issue:44038"}, {"accept": false, "left": "issue:42886", "reason": "Offline cache loading failure and wrong tokenizer decoder type are separate tokenizer-related bugs with different code paths.", "right": "issue:43066"}, {"accept": false, "left": "issue:44117", "reason": "`TOKENIZER_MAPPING_NAMES` returning `None` and `AutoProcessor.from_pretrained` dropping kwargs to `cached_file` are different loader defects.", "right": "issue:44704"}, {"accept": false, "left": "issue:30990", "reason": "Sentence Transformers hanging on load vs MacOS bus error for a community CLIP model are distinct platform/model-specific failures.", "right": "issue:33357"}, {"accept": false, "left": "issue:43122", "reason": "Tokenizer output changed across versions, while this is a batched `apply_chat_template` crash. Related area, but not the same bug.", "right": "issue:44514"}, {"accept": false, "left": "issue:42907", "reason": "Saving dequantized models and incorrect output under pipeline parallelism affect different subsystems and failure modes.", "right": "issue:44945"}, {"accept": false, "left": "issue:42371", "reason": "TF32 configuration behavior and `torch.split()` return values in model code are unrelated.", "right": "issue:44263"}], "summary": "The proposed soft pairs are only superficially similar; they span different components, symptoms, and root causes. I would not merge any of them as duplicates. No PRs were provided."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43576|issue:43784", "issue:44263|issue:44373", "issue:43122|issue:44521", "issue:42831|issue:44038", "issue:42886|issue:43066", "issue:44117|issue:44704", "issue:30990|issue:33357", "issue:43122|issue:44514", "issue:42907|issue:44945", "issue:42371|issue:44263"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6474, "estimated_input_tokens": 3109, "item_count": 17, "node_count": 17, "serialized_chars": 12436, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:02:23Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "11c5904541a9fc3053357ba6ff65c02bfe1fd745462649b4257ebd41305b8bb6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39401", "issue:41628", "issue:42907", "issue:43232", "issue:43408", "issue:43475", "issue:43937", "issue:43976", "issue:44488", "issue:44625", "issue:44704", "issue:44743", "issue:44829", "issue:44945", "issue:45245", "issue:45278", "issue:45290"], "result": {"analyst_result": {"best_issue_reason": "If forced to pick a representative issue, issue:44625 has the strongest cluster signal by discussion/inbound refs, but it still does not represent the rest of the mixed topics well.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out because the items are about different code paths and features; the soft-similarity links are all false positives.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:44704", "reason": "Different bugs: cached_file kwargs forwarding in AutoProcessor vs chat template crashing on tool-call assistant messages.", "right": "issue:45290"}, {"accept": false, "left": "issue:43976", "reason": "Unrelated failure modes: Python-version compatibility regression vs incorrect output under pipeline parallelism.", "right": "issue:44945"}, {"accept": false, "left": "issue:43232", "reason": "Generation sync_gpus/model kwargs issue is unrelated to the 2^24 categories runtime error.", "right": "issue:45245"}, {"accept": false, "left": "issue:43232", "reason": "Both involve generation, but one is sync_gpus kwargs handling and the other is recurrent-state reset in modular_qwen3_5; not the same bug.", "right": "issue:44743"}, {"accept": false, "left": "issue:39401", "reason": "Qwen3 offset_mapping bug and loading cjvt/sleng-bert are different tokenizer/model-loading issues.", "right": "issue:44488"}, {"accept": false, "left": "issue:42907", "reason": "Saving dequantized Ministrals/Devstrals is unrelated to GLM-5 GenerationConfig validation.", "right": "issue:43937"}, {"accept": false, "left": "issue:41628", "reason": "One is a missing AutoImageProcessor import, the other is a broad set of import errors after upgrade; not the same concrete defect.", "right": "issue:45278"}, {"accept": false, "left": "issue:44625", "reason": "Qwen3.5 num_labels propagation and flash_attention_3 degenerate training are separate model/config/training issues.", "right": "issue:44829"}, {"accept": false, "left": "issue:43408", "reason": "SAM 3 model-type warning and missing fpn_position_embeddings are distinct SAM 3 problems with different failure points.", "right": "issue:43475"}], "summary": "The cluster is heterogeneous: the soft pairs span unrelated bugs in tokenization, generation, import/export, SAM 3, Qwen, and saving/serialization. None of the candidate pairs look like the same underlying issue, so there is no true duplicate canonical here."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44704|issue:45290", "issue:43976|issue:44945", "issue:43232|issue:45245", "issue:43232|issue:44743", "issue:39401|issue:44488", "issue:42907|issue:43937", "issue:41628|issue:45278", "issue:44625|issue:44829", "issue:43408|issue:43475"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6742, "estimated_input_tokens": 3243, "item_count": 18, "node_count": 18, "serialized_chars": 12969, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:02:37Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b58cff8d19f6809bd16e4f9b9532471e0751b995ed614cfdc14b37f263967b79", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29127", "issue:30990", "issue:39401", "issue:41628", "issue:43116", "issue:43122", "issue:43992", "issue:44261", "issue:44295", "issue:44297", "issue:44360", "issue:44373", "issue:44485", "issue:44521", "issue:44704", "issue:44779", "issue:44987", "issue:45200"], "result": {"analyst_result": {"best_issue_reason": "Issue 44779 is the best representative issue because it describes a specific broken behavior, a regression window, and a user-visible incorrect result rather than a vague error or discussion.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44779 is the most concrete and directly reproducible bug report in this set: a version-regression in tokenizer output with a clear before/after comparison and an explicit affected model.", "canonical_pr_reason": null, "confidence": 0.77, "soft_edge_verdicts": [{"accept": false, "left": "issue:43116", "reason": "Both involve classification/fine-tuning workflows, but one is an example script returning empty multi-label outputs and the other is a Gemma 4 multimodal token-type-id defaulting issue. Different code paths and symptoms.", "right": "issue:45200"}, {"accept": false, "left": "issue:43122", "reason": "Both mention tokenizer regressions across v4/v5, but 43122 is a broad change in tokenization behavior while 44779 is a DeepSeek-specific incorrect-result report. Too little evidence they are the same underlying bug.", "right": "issue:44779"}, {"accept": false, "left": "issue:44360", "reason": "These are unrelated model-implementation discussions: a DSA indexer ReLU concern versus GLM-5 RoPE implementation. Same general area, but not the same bug.", "right": "issue:44485"}, {"accept": false, "left": "issue:43992", "reason": "One is missing embed_tokens.weight in UMT5Encoder.from_pretrained; the other is AutoProcessor not forwarding kwargs to cached_file. Different functions and failure modes.", "right": "issue:44704"}, {"accept": false, "left": "issue:29127", "reason": "LayoutLMv3 error messaging for boxes and Sentence Transformers loading hang are clearly unrelated issues.", "right": "issue:30990"}, {"accept": false, "left": "issue:44261", "reason": "One reports a precision/config issue in MLA q_a_layernorm, the other a wrong docstring for position_ids. Documentation and runtime bug are not the same problem.", "right": "issue:44373"}, {"accept": false, "left": "issue:44295", "reason": "A position_ids buffer access error and a tokenizer_class mismatch in saved config are separate bugs affecting different parts of the stack.", "right": "issue:44297"}, {"accept": false, "left": "issue:39401", "reason": "Wrong offset_mapping for Qwen3 tokenization and zero assistant_masks for multimodal chat templates are different tokenizer/template bugs.", "right": "issue:44521"}, {"accept": false, "left": "issue:41628", "reason": "Importing AutoImageProcessor fails immediately, while loading physical-intelligence/fast under transformers>=5.1.0 is a broader model-loading failure. Similar surface area, not the same concrete issue.", "right": "issue:44987"}], "summary": "Most pairs are only loosely similar by subsystem or symptom; none are strong duplicates. The clearest near-match is the tokenizer regression pair, but it still looks model-specific vs general and should be kept separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43116|issue:45200", "issue:43122|issue:44779", "issue:44360|issue:44485", "issue:43992|issue:44704", "issue:29127|issue:30990", "issue:44261|issue:44373", "issue:44295|issue:44297", "issue:39401|issue:44521", "issue:41628|issue:44987"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6856, "estimated_input_tokens": 3300, "item_count": 18, "node_count": 18, "serialized_chars": 13198, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:02:51Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "abccc93470faab51123a9adab6915391c931c43c32a4b0276ef1cbaca014b326", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36296", "issue:42371", "issue:42831", "issue:42907", "issue:43116", "issue:43122", "issue:43278", "issue:43452", "issue:43550", "issue:43824", "issue:43927", "issue:44038", "issue:44222", "issue:44295", "issue:44297", "issue:44351", "issue:44945", "issue:44977"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:42371", "reason": "TF32 API guidance vs FineGrainedFP8 accuracy; both involve numeric precision, but they are different features and failure modes.", "right": "issue:42831"}, {"accept": false, "left": "issue:43550", "reason": "torch.compile/SDPA failure in Bamba-9B-v2 vs position_ids buffer read error; unrelated code paths and symptoms.", "right": "issue:44295"}, {"accept": false, "left": "issue:42907", "reason": "Dequantized model save failure vs gguf_file breaking from_pretrained; both touch loading/saving, but the underlying bugs differ.", "right": "issue:43452"}, {"accept": false, "left": "issue:36296", "reason": "Tensor parallel training bug vs multi-label classification example returning empty results; clearly different subsystems and behaviors.", "right": "issue:43116"}, {"accept": false, "left": "issue:43927", "reason": "DiaConfig custom token IDs lost on save/load vs position_ids buffer access error; both are state-handling bugs, but not the same one.", "right": "issue:44295"}, {"accept": false, "left": "issue:44038", "reason": "Qwen3-VL-Moe / transformers 5.0 issue vs tokenizer_class mismatch in tokenizer_config.json; different components and failure modes.", "right": "issue:44297"}, {"accept": false, "left": "issue:43824", "reason": "Missing Qwen2_5_VLForConditionalGeneration import vs missing HybridCache import; both are import errors, but for different symbols and fixes.", "right": "issue:44351"}, {"accept": false, "left": "issue:44038", "reason": "Qwen3-VL-Moe compatibility issue vs FP8 save_pretrained for moe; related vocabulary, but different concrete bugs and likely different fixes.", "right": "issue:44222"}, {"accept": false, "left": "issue:43122", "reason": "Tokenizer output changed across versions vs Qwen3.5 flash-attention generation failure; no shared code path beyond general model behavior.", "right": "issue:44977"}, {"accept": false, "left": "issue:43278", "reason": "Embedding dtype mismatch in train/eval vs incorrect pipeline-parallel LLM output; different mechanisms and not mergeable as one fix.", "right": "issue:44945"}], "summary": "This cluster is heterogeneous: the items cover unrelated bugs in precision, tokenization, save/load, imports, and parallelism. None of the proposed soft pairs look like the same underlying issue or a plausible single PR fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42371|issue:42831", "issue:43550|issue:44295", "issue:42907|issue:43452", "issue:36296|issue:43116", "issue:43927|issue:44295", "issue:44038|issue:44297", "issue:43824|issue:44351", "issue:44038|issue:44222", "issue:43122|issue:44977", "issue:43278|issue:44945"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6418, "estimated_input_tokens": 3081, "item_count": 17, "node_count": 17, "serialized_chars": 12322, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:03:11Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c0a9f32d2c872ac491f8e7045321c7458757f2ff5b816eb96fd1d7b2273c8a7c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38617", "issue:42222", "issue:42673", "issue:42907", "issue:43064", "issue:43441", "issue:43673", "issue:43784", "issue:43937", "issue:44075", "issue:44222", "issue:44485", "issue:44912", "issue:44933", "issue:44936", "issue:45072", "issue:45278"], "result": {"analyst_result": {"best_issue_reason": "#45278 is the best single issue to represent the cluster because it is open, broad, and likely covers a wider slice of the import-regression surface than the more specific one-off failures.", "best_pr_reason": null, "canonical_issue_reason": "#45278 is the broadest, most central user-facing report in the set: multiple import failures after upgrading Transformers, which best fits the cluster\u2019s only partial overlap around API/import breakage.", "canonical_pr_reason": null, "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "issue:42222", "reason": "Different models and different failure modes: vitpose imports/breakage versus Ministral-3 FlashAttention regression. No shared code path or same fix.", "right": "issue:43441"}, {"accept": false, "left": "issue:42907", "reason": "Both involve saving quantized models, but one is dequantized Ministral/Devstrals saving and the other is FP8 MoE save_pretrained; too different to treat as the same bug.", "right": "issue:44222"}, {"accept": false, "left": "issue:42907", "reason": "Saving dequantized models and MXFP4 load fallback are separate quantization workflows with different symptoms and likely different fixes.", "right": "issue:44912"}, {"accept": false, "left": "issue:38617", "reason": "Both are import-error reports, but #38617 is a specific missing symbol while #45278 is a broad multi-import regression after upgrade; overlap is too generic.", "right": "issue:45278"}, {"accept": false, "left": "issue:43784", "reason": "Both mention import errors, but they affect different symbols and different downstream packages/subsystems; not the same underlying bug.", "right": "issue:44933"}, {"accept": false, "left": "issue:43064", "reason": "Trainer/FSDP optimizer-state corruption and GenerationMixin cache absence are unrelated v5 regressions in different code paths.", "right": "issue:43673"}, {"accept": false, "left": "issue:43937", "reason": "Same model family, but one is invalid GenerationConfig validation and the other is RoPE implementation; distinct bugs.", "right": "issue:44485"}, {"accept": false, "left": "issue:42673", "reason": "VRAM leak in threaded Qwen3 inference and bfloat16 dtype mismatch in SwitchTransformers/TimmWrapperModel are unrelated runtime issues.", "right": "issue:45072"}, {"accept": false, "left": "issue:44075", "reason": "Optimizer SGD arguments not being used and trainer.evaluate() failing after train() are different trainer/optimizer problems with no clear common fix.", "right": "issue:44936"}], "summary": "Mostly unrelated closed/open bug reports with a few superficial theme overlaps (imports, quantization save/load, GLM-5 generation). No soft pair looks like a true duplicate or mergeable PR-equivalent; the broadest umbrella issue is the import-regression report."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42222|issue:43441", "issue:42907|issue:44222", "issue:42907|issue:44912", "issue:38617|issue:45278", "issue:43784|issue:44933", "issue:43064|issue:43673", "issue:43937|issue:44485", "issue:42673|issue:45072", "issue:44075|issue:44936"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6898, "estimated_input_tokens": 3321, "item_count": 18, "node_count": 18, "serialized_chars": 13281, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:03:26Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "318c56f50ce505914d9faad9f09e74a2f91706d8a7c0ce05700fd20a99a9b55d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:22355", "issue:29127", "issue:33453", "issue:39401", "issue:42371", "issue:42673", "issue:43116", "issue:43493", "issue:43825", "issue:43874", "issue:43906", "issue:43931", "issue:43937", "issue:44484", "issue:44779", "issue:44945", "issue:44977", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43116", "reason": "Different components and failure modes: an example-script multi-label classification result bug vs a SigLIP2 implementation discrepancy. No shared code path or fix.", "right": "issue:43493"}, {"accept": false, "left": "issue:42371", "reason": "Unrelated concerns: TF32 configuration guidance vs a GLM46V image-processor method missing an attribute. Similar only in being user-facing errors.", "right": "issue:43874"}, {"accept": false, "left": "issue:22355", "reason": "Completely different areas: missing transformers.onnx module vs LayoutLMv3 error-message clarity for insufficient box information.", "right": "issue:29127"}, {"accept": false, "left": "issue:43116", "reason": "Both mention pipeline/example usage, but one is a bad multi-label example outcome and the other is a v5 translation-task support message regression. Not the same bug.", "right": "issue:43825"}, {"accept": false, "left": "issue:39401", "reason": "Both are tokenizer-related regressions, but one is Qwen3 offset_mapping and the other is Deepseek tokenization behavior in v5. Different models and symptoms.", "right": "issue:44779"}, {"accept": false, "left": "issue:33453", "reason": "Tokenization-loading regression vs incorrect offset_mapping for Qwen3. Related subsystem, but not the same defect.", "right": "issue:39401"}, {"accept": false, "left": "issue:43931", "reason": "Model weight-shape mismatch during loading is distinct from incorrect LLM output under pipeline parallelism; different code paths and remedies.", "right": "issue:44945"}, {"accept": false, "left": "issue:43937", "reason": "Invalid GenerationConfig validation and flash-attention generation failure are separate issues affecting different parts of generation.", "right": "issue:44977"}, {"accept": false, "left": "issue:44484", "reason": "save_pretrained shard-size behavior is unrelated to Qwen3.5 MoE from_pretrained loading errors.", "right": "issue:45310"}, {"accept": false, "left": "issue:42673", "reason": "VRAM leak with multiple dataloader threads is not the same as an isolated reproduction of another issue; insufficient evidence of a shared root cause.", "right": "issue:43906"}], "summary": "This cluster is not a true duplicate set: the items span unrelated bugs in tokenizers, model loading, generation, image processing, TF32 settings, and example scripts. The soft-similarity pairs are only superficially related by broad wording, not by the same underlying defect or fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43116|issue:43493", "issue:42371|issue:43874", "issue:22355|issue:29127", "issue:43116|issue:43825", "issue:39401|issue:44779", "issue:33453|issue:39401", "issue:43931|issue:44945", "issue:43937|issue:44977", "issue:44484|issue:45310", "issue:42673|issue:43906"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6918, "estimated_input_tokens": 3331, "item_count": 18, "node_count": 18, "serialized_chars": 13321, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:03:39Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "89e76b92ce202fe29aad6d1044473d306e57bf9b2a565a68b42d26ee795adb7f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38617", "issue:39401", "issue:41628", "issue:42673", "issue:43408", "issue:43421", "issue:43450", "issue:43577", "issue:43742", "issue:43986", "issue:44117", "issue:44291", "issue:44410", "issue:44829", "issue:45071", "issue:45081", "issue:45216", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:39401", "reason": "Both concern tokenizers, but one is a Qwen3 offset_mapping bug and the other is a runtime special-token/post-processor sync issue; different underlying failures.", "right": "issue:43421"}, {"accept": false, "left": "issue:43450", "reason": "Both involve video models, but one is a batched shape bug in processors while the other is a crash due to missing torchvision; not the same code-path problem.", "right": "issue:43986"}, {"accept": false, "left": "issue:44291", "reason": "Both relate to v5.4.0/loading, but one is init_empty_weights passing an unexpected argument and the other is PretrainedConfig type-checking regression; distinct causes.", "right": "issue:45071"}, {"accept": false, "left": "issue:38617", "reason": "Both are import errors, but they are for different symbols from different modules; no shared underlying bug.", "right": "issue:41628"}, {"accept": false, "left": "issue:44117", "reason": "Both are tokenizer-loading failures, but one is a mapping registry returning None and the other is a Mistral regex patch crash; separate defects.", "right": "issue:45081"}, {"accept": false, "left": "issue:42673", "reason": "Completely different symptoms and subsystems: VRAM leak in Qwen3ForCausalLM vs dtype mismatch when loading BLIP2.", "right": "issue:43577"}, {"accept": false, "left": "issue:44829", "reason": "One is a flash_attention_3 training degeneration bug; the other is a Qwen3.5 save_pretrained checkpoint regression. Not mergeable as one fix.", "right": "issue:45216"}, {"accept": false, "left": "issue:44410", "reason": "Both mention Qwen3 family, but one is missing layer projections in qwen3next and the other is a Qwen3.5 MoE from_pretrained loading error; different code paths.", "right": "issue:45310"}, {"accept": false, "left": "issue:43408", "reason": "Different models and failure modes: a model-type warning for sam3_video/sam3_tracker versus a KeyError loading MobileLLM-125M.", "right": "issue:43742"}], "summary": "This cluster is not a true duplicate set: it mixes unrelated import errors, tokenizer bugs, video-processing issues, model-loading regressions, dtype/state problems, and one training-path issue. No single issue cleanly represents the whole cluster."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:39401|issue:43421", "issue:43450|issue:43986", "issue:44291|issue:45071", "issue:38617|issue:41628", "issue:44117|issue:45081", "issue:42673|issue:43577", "issue:44829|issue:45216", "issue:44410|issue:45310", "issue:43408|issue:43742"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6704, "estimated_input_tokens": 3224, "item_count": 18, "node_count": 18, "serialized_chars": 12894, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:04:01Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0031fd64183d509323ecba6840bd5a098aaf2c610b1c9ed45fa9f618154e73a8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38617", "issue:41628", "issue:42175", "issue:42222", "issue:42371", "issue:42617", "issue:42673", "issue:43066", "issue:43122", "issue:43582", "issue:43784", "issue:43937", "issue:44220", "issue:44261", "issue:44496", "issue:44933", "issue:45278", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": "Issue 45278 is the best single representative of the cluster because it captures the widest symptom surface, though it is not an exact duplicate of the other reports.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45278 is the broadest umbrella candidate because it describes multiple import errors after a version upgrade, but the set is still too diverse for a true duplicate canonicalization.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:38617", "reason": "Different failures: a missing import from configuration_utils versus an issue in _torch_extract_fbank_features(). No shared concrete code path.", "right": "issue:44220"}, {"accept": false, "left": "issue:42673", "reason": "VRAM leak in Qwen3ForCausalLM multi-thread use is unrelated to a TypeError in caching_allocator_warmup on Apple Silicon.", "right": "issue:43582"}, {"accept": false, "left": "issue:42175", "reason": "Backend/dependency packaging problem does not match vitpose model breakage; different symptoms and likely different fixes.", "right": "issue:42222"}, {"accept": false, "left": "issue:42371", "reason": "TF32 API warning/behavior request is not the same as MLA q_a_layernorm precision error from missing rms_norm_eps.", "right": "issue:44261"}, {"accept": false, "left": "issue:41628", "reason": "Both are import failures, but the missing symbol and root cause differ: AutoImageProcessor export vs sentence-transformers importing nn.", "right": "issue:43784"}, {"accept": false, "left": "issue:43937", "reason": "GenerationConfig validation for GLM-5 is unrelated to unrecognized model_type in a different model repo.", "right": "issue:44496"}, {"accept": false, "left": "issue:44933", "reason": "One is a specific nonexistent image_utils import; the other is a broad post-upgrade import-error report, not the same concrete bug.", "right": "issue:45278"}, {"accept": false, "left": "issue:42617", "reason": "3d_parallel.py execution failure and a testing_utils.py bug are different code paths and different problem classes.", "right": "issue:45341"}, {"accept": false, "left": "issue:43066", "reason": "Tokenizer decoder-type issue and tokenization-output differences may be adjacent v5 regressions, but they are not clearly the same bug or mergeable into one fix.", "right": "issue:43122"}], "summary": "This cluster is mostly heterogeneous: import/export regressions, tokenizer behavior changes, model config validation issues, backend packaging problems, and unrelated runtime bugs. I do not see any soft pair that looks like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:38617|issue:44220", "issue:42673|issue:43582", "issue:42175|issue:42222", "issue:42371|issue:44261", "issue:41628|issue:43784", "issue:43937|issue:44496", "issue:44933|issue:45278", "issue:42617|issue:45341", "issue:43066|issue:43122"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6780, "estimated_input_tokens": 3262, "item_count": 18, "node_count": 18, "serialized_chars": 13045, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:04:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "af0bac641dd73284dda88d333744e5d41c2593a9c9ea91f0b903d2e7f95c0fe5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42222", "issue:42994", "issue:43012", "issue:43097", "issue:43408", "issue:43441", "issue:43673", "issue:43874", "issue:43986", "issue:44261", "issue:44263", "issue:44484", "issue:44485", "issue:44493", "issue:44829", "issue:44964", "issue:45278", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": "Issue 45278 is the broadest and most generic of the set, but the cluster is too heterogeneous to have a true canonical issue; it is only a weak representative for the import-error theme.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:44964", "reason": "Both are loading/import problems, but one is specific to microsoft/Phi-4-multimodal-instruct while the other is a broad upgrade regression from 4.57.0 to 5.5.0; not the same underlying bug.", "right": "issue:45278"}, {"accept": false, "left": "issue:43673", "reason": "GenerationMixin cache/chunked_prefill failure and a testing_utils bug are unrelated code paths with different symptoms and scope.", "right": "issue:45341"}, {"accept": false, "left": "issue:43097", "reason": "One is about a removed tie_embeddings_and_encoder_decoder config option, the other about unexpected position_id keys; both are migration/config issues but not the same defect.", "right": "issue:44493"}, {"accept": false, "left": "issue:43874", "reason": "Both mention missing config fields in multimodal/model code, but they affect different attributes and different failure modes (AttributeError vs precision warning/error).", "right": "issue:44261"}, {"accept": false, "left": "issue:43441", "reason": "FlashAttention is the shared theme, but one is a Ministral-3 v5 RC failure and the other is degenerate training in sequence classification; not the same concrete bug.", "right": "issue:44829"}, {"accept": false, "left": "issue:43986", "reason": "AutoProcessor/torchvision crash and save_pretrained shard-size behavior are unrelated.", "right": "issue:44484"}, {"accept": false, "left": "issue:42222", "reason": "Broken vitpose model loading and quantized model saving failure are different subsystems and failure modes.", "right": "issue:42994"}, {"accept": false, "left": "issue:44263", "reason": "Both are GLM-related, but one concerns torch.split return values in a MoE indexer and the other RoPE implementation; not mergeable as one bug.", "right": "issue:44485"}, {"accept": false, "left": "issue:43012", "reason": "A bfloat16 compile warning and a sam3_video/sam3_tracker model-type mismatch are unrelated issues.", "right": "issue:43408"}], "summary": "These issues are largely unrelated one-off bugs across different model families and subsystems. The soft pairs share at most a broad theme (e.g. loading errors, GLM, flash attention) but not the same concrete failure mode, so none should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44964|issue:45278", "issue:43673|issue:45341", "issue:43097|issue:44493", "issue:43874|issue:44261", "issue:43441|issue:44829", "issue:43986|issue:44484", "issue:42222|issue:42994", "issue:44263|issue:44485", "issue:43012|issue:43408"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6504, "estimated_input_tokens": 3124, "item_count": 17, "node_count": 17, "serialized_chars": 12495, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:04:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c7530b56a1e1de810f5f9a4af3c258a763930789f72f4bbda7710632a02a3e21", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41628", "issue:42222", "issue:43097", "issue:43452", "issue:43784", "issue:43874", "issue:43931", "issue:43976", "issue:43986", "issue:44117", "issue:44263", "issue:44410", "issue:44496", "issue:44987", "issue:45003", "issue:45071", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": "If one issue must represent the cluster, issue #45071 is the broadest regression-style report and has the strongest discussion/reference signal, but it is still not a good duplicate umbrella for the rest.", "best_pr_reason": null, "canonical_issue_reason": "No single issue is a true canonical duplicate for this cluster because the reports span different subsystems and failure modes.", "canonical_pr_reason": null, "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "issue:41628", "reason": "Different failures: import-time AutoImageProcessor availability vs PretrainedConfig type-checking regression.", "right": "issue:45071"}, {"accept": false, "left": "issue:45003", "reason": "Unrelated subsystems: sys.modules access in modeling_utils vs a testing_utils bug.", "right": "issue:45341"}, {"accept": false, "left": "issue:43784", "reason": "Different symptoms and causes: sentence-transformers import NameError vs Python version compatibility breakage.", "right": "issue:43976"}, {"accept": false, "left": "issue:43097", "reason": "Distinct issues: removed config flag vs gguf_file loading path regression.", "right": "issue:43452"}, {"accept": false, "left": "issue:42222", "reason": "Both are model-loading related, but one is a vitpose breakage and the other is an unrecognized-model/config issue for OLMo.", "right": "issue:44496"}, {"accept": false, "left": "issue:44117", "reason": "Both involve from_pretrained, but the reported failures are not clearly the same code-path bug.", "right": "issue:44987"}, {"accept": false, "left": "issue:43931", "reason": "Different model families and different structural errors: Qwen3-VL weight mismatch vs qwen3next missing projections.", "right": "issue:44410"}, {"accept": false, "left": "issue:43986", "reason": "AutoProcessor/torchvision loading crash is unrelated to a testing utility bug.", "right": "issue:45341"}, {"accept": false, "left": "issue:43874", "reason": "Different GLM-related failures: missing image patch method vs a torch.split return-value issue.", "right": "issue:44263"}], "summary": "This cluster is not a duplicate set; it groups several unrelated Transformers bug reports. The soft-similarity pairs share broad themes like loading/import regressions, but none look like the same underlying bug or a PR-worthy merge target."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:41628|issue:45071", "issue:45003|issue:45341", "issue:43784|issue:43976", "issue:43097|issue:43452", "issue:42222|issue:44496", "issue:44117|issue:44987", "issue:43931|issue:44410", "issue:43986|issue:45341", "issue:43874|issue:44263"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7036, "estimated_input_tokens": 3390, "item_count": 18, "node_count": 18, "serialized_chars": 13558, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:05:02Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e8a2a6c5c31faff559705cda4e1665c4c977488cd0f00a452eb2115b51af613e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43064", "issue:43335", "issue:43408", "issue:43531", "issue:43931", "issue:43976", "issue:43986", "issue:44038", "issue:44261", "issue:44315", "issue:44368", "issue:44464", "issue:44485", "issue:44829", "issue:45071", "issue:45245", "issue:45341", "issue:45372"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43408", "reason": "Different model families and symptoms: sam3_video\u2192sam3_tracker config warning vs Qwen3.5 LoRA tie_word_embeddings warning.", "right": "issue:44368"}, {"accept": false, "left": "issue:43335", "reason": "Different bugs and code paths: SwitchTransformers sparse-layer creation vs PretrainedConfig type-checking regression.", "right": "issue:45071"}, {"accept": false, "left": "issue:43986", "reason": "Both are loading/import failures, but for different causes and targets: missing torchvision for video AutoProcessor vs mistral_common ReasoningEffort import breaking Gemma 4 processor loading.", "right": "issue:45372"}, {"accept": false, "left": "issue:43931", "reason": "Same broad Qwen3-VL area, but not the same underlying issue: weight-shape mismatch on a specific model vs a transformers 5.0 / Qwen3-VL-MoE bug.", "right": "issue:44038"}, {"accept": false, "left": "issue:44829", "reason": "Unrelated failures: flash_attention_3 causes degenerate training, while the other is an embedding/category cardinality RuntimeError.", "right": "issue:45245"}, {"accept": false, "left": "issue:43976", "reason": "Python version compatibility issue vs generic PretrainedConfig type-check regression; different failure modes and fixes.", "right": "issue:45071"}, {"accept": false, "left": "issue:43064", "reason": "Distinct training-stack bugs: FSDP2+PEFT cpu_ram_efficient_loading corrupts optimizer state, while Liger Kernel is not applied with model_init.", "right": "issue:44315"}, {"accept": false, "left": "issue:44261", "reason": "Different model-specific math/config issues: missing rms_norm_eps for MLA q_a_layernorm vs GLM-5 RoPE implementation discussion.", "right": "issue:44485"}, {"accept": false, "left": "issue:44464", "reason": "Chunked generation inconsistency with compiled forward is unrelated to the PretrainedConfig type-checking breakage.", "right": "issue:45071"}, {"accept": false, "left": "issue:43531", "reason": "Completely different scope: Qwen3-MoE sliding_window behavior vs a small testing_utils bug.", "right": "issue:45341"}], "summary": "The items are a heterogeneous mix of unrelated bugs across different models, configs, and runtime paths. All soft-similarity pairs appear to be false positives rather than true duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43408|issue:44368", "issue:43335|issue:45071", "issue:43986|issue:45372", "issue:43931|issue:44038", "issue:44829|issue:45245", "issue:43976|issue:45071", "issue:43064|issue:44315", "issue:44261|issue:44485", "issue:44464|issue:45071", "issue:43531|issue:45341"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7082, "estimated_input_tokens": 3413, "item_count": 18, "node_count": 18, "serialized_chars": 13651, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:05:21Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1e74e5531bfd33e5fe4d11d019b7341e67d88b39c1315504ab755669ab2b52b4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38617", "issue:43066", "issue:43097", "issue:43502", "issue:43824", "issue:43906", "issue:43986", "issue:43992", "issue:44038", "issue:44062", "issue:44117", "issue:44351", "issue:44410", "issue:44704", "issue:44964", "issue:45103", "issue:45341", "issue:45375"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:38617", "reason": "Both are import errors, but they concern different missing symbols (`layer_type_validation` vs `HybridCache`) and different failure paths.", "right": "issue:44351"}, {"accept": false, "left": "issue:44704", "reason": "`AutoProcessor.from_pretrained` kwargs forwarding and `_process_kwargs_parameters` docstring generation are unrelated bugs.", "right": "issue:45103"}, {"accept": false, "left": "issue:44410", "reason": "Both are Qwen-related, but one is a missing projection layer in a model and the other is a config field being dropped by `@strict`; different code paths and fixes.", "right": "issue:45375"}, {"accept": false, "left": "issue:44062", "reason": "A tokenizer `AddedToken` construction error is not the same bug as `TOKENIZER_MAPPING_NAMES` returning `None` during `from_pretrained`.", "right": "issue:44117"}, {"accept": false, "left": "issue:43502", "reason": "`local_files_only=True` network leakage and missing `embed_tokens.weight` in UMT5 are unrelated load-time issues.", "right": "issue:43992"}, {"accept": false, "left": "issue:43986", "reason": "Video model loading without torchvision and tokenizer mapping lookup returning `None` are different subsystems and symptoms.", "right": "issue:44117"}, {"accept": false, "left": "issue:38617", "reason": "Missing import of `layer_type_validation` and removal of `tie_embeddings_and_encoder_decoder` are separate API regressions.", "right": "issue:43097"}, {"accept": false, "left": "issue:44038", "reason": "Both mention Qwen/5.0, but one is a general Qwen3-VL-Moe regression and the other is a specific missing-projection bug in qwen3next; not the same concrete change.", "right": "issue:44410"}, {"accept": false, "left": "issue:44964", "reason": "Model loading failure for Phi-4 multimodal and a bug in `testing_utils.py` do not share the same underlying defect.", "right": "issue:45341"}, {"accept": false, "left": "issue:43824", "reason": "Both are multimodal import/load issues, but one is a missing model export and the other is a torchvision dependency/crash path; not mergeable as one fix.", "right": "issue:43986"}, {"accept": false, "left": "issue:43066", "reason": "Wrong tokenizer decoder type in v5 and an isolated reproduction of another issue are not the same bug.", "right": "issue:43906"}], "summary": "This cluster is a loose collection of unrelated Transformers issues (imports, tokenizer behavior, AutoProcessor/loading, config/model regressions, and a docstring utility crash). The soft pairs share superficial wording or broad subsystem names, but not the same concrete bug or fix path."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:38617|issue:44351", "issue:44704|issue:45103", "issue:44410|issue:45375", "issue:44062|issue:44117", "issue:43502|issue:43992", "issue:43986|issue:44117", "issue:38617|issue:43097", "issue:44038|issue:44410", "issue:44964|issue:45341", "issue:43824|issue:43986", "issue:43066|issue:43906"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6708, "estimated_input_tokens": 3226, "item_count": 17, "node_count": 17, "serialized_chars": 12903, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:05:40Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "057b3341d64c642a00a60224f9e9413071d098be599264cbe75875360d193e30", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43012", "issue:43097", "issue:43299", "issue:43352", "issue:43644", "issue:43825", "issue:43873", "issue:43901", "issue:43957", "issue:43992", "issue:44351", "issue:44368", "issue:44380", "issue:44410", "issue:44484", "issue:44655", "issue:44829"], "result": {"analyst_result": {"best_issue_reason": "issue:43873 is the strongest standalone triage target: it is open, has the most discussion/inbound references in this set, and describes a concrete quantization/offloading failure. It is not a duplicate of the other issues, though.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43957", "reason": "Different failures: one is `torch.device(\"meta\")` loading breakage for some models, the other is a missing `HybridCache` import symbol.", "right": "issue:44351"}, {"accept": false, "left": "issue:43992", "reason": "One is a model-specific missing `embed_tokens.weight` on load; the other asks why `max_shard_size` defaults to 50GB. Different code paths.", "right": "issue:44484"}, {"accept": false, "left": "issue:43097", "reason": "`tie_embeddings_and_encoder_decoder` removal is a config/API regression; quantization offloading failure is an unrelated runtime behavior issue.", "right": "issue:43873"}, {"accept": false, "left": "issue:43012", "reason": "Precision-warning during bfloat16 compilation is unrelated to degenerate training caused by `flash_attention_3`.", "right": "issue:44829"}, {"accept": false, "left": "issue:43299", "reason": "Qwen3VL MoE loading breakage and missing Qwen3Next projections are different model-specific implementation bugs.", "right": "issue:44410"}, {"accept": false, "left": "issue:43352", "reason": "Unsupported Flash Attention 2 for NemotronH is a model/backend compatibility issue; GPT2 attention scaling being ignored is a separate backend semantics bug.", "right": "issue:44380"}, {"accept": false, "left": "issue:43825", "reason": "The first is a misleading pipeline error message for translation tasks; the second is about saving Pipeline objects. Same area, but not the same bug.", "right": "issue:44655"}, {"accept": false, "left": "issue:43644", "reason": "Non-persistent buffers getting junk values is a state initialization bug; `max_shard_size` being 50GB is a save-pretrained default question.", "right": "issue:44484"}, {"accept": false, "left": "issue:43097", "reason": "One concerns a removed config field; the other is stale docs mentioning `return_all_scores`. Similar theme, different issue.", "right": "issue:43901"}, {"accept": false, "left": "issue:43012", "reason": "PyTorch precision warnings from bfloat16 compilation are unrelated to the `tie_word_embeddings` warning emitted during LoRA fine-tuning.", "right": "issue:44368"}], "summary": "This cluster is heterogeneous: the issues cover unrelated Transformers regressions, docs gaps, and model-specific loading/runtime problems. None of the provided soft pairs look like the same underlying bug/change, so all are rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43957|issue:44351", "issue:43992|issue:44484", "issue:43097|issue:43873", "issue:43012|issue:44829", "issue:43299|issue:44410", "issue:43352|issue:44380", "issue:43825|issue:44655", "issue:43644|issue:44484", "issue:43097|issue:43901", "issue:43012|issue:44368"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6472, "estimated_input_tokens": 3108, "item_count": 17, "node_count": 17, "serialized_chars": 12430, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:06:05Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "189bc482778919030f05662e652b718d055995a3b88d74e632ae98c0880813ba", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42886", "issue:42907", "issue:43012", "issue:43064", "issue:43208", "issue:43352", "issue:43381", "issue:43525", "issue:43526", "issue:43550", "issue:43618", "issue:43937", "issue:43976", "issue:44117", "issue:44380", "issue:44655", "issue:44792"], "result": {"analyst_result": {"best_issue_reason": "issue:44117 is the best triage representative because it is a concrete, infrastructure-level failure with a clear code-path implication, making it more canonical than the other narrowly scoped or model-specific issues.", "best_pr_reason": null, "canonical_issue_reason": "issue:44117 is the closest thing to a representative core-library bug: it describes a generic loader assumption in `from_pretrained`, which is more central and reusable than the highly model- or workflow-specific reports elsewhere in the set.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:42886", "reason": "Tokenizer offline cache loading and gradient-checkpointing eval-mode behavior are different subsystems with unrelated failure modes.", "right": "issue:43381"}, {"accept": false, "left": "issue:43937", "reason": "A GenerationConfig validation error in GLM-5 is not the same bug as a failed janus image-generation test; the model, symptom, and path differ.", "right": "issue:44792"}, {"accept": false, "left": "issue:42907", "reason": "Both involve saving, but one is dequantized model saving and the other is pipeline save_pretrained; they are different object types and likely different code paths.", "right": "issue:44655"}, {"accept": false, "left": "issue:43525", "reason": "Both mention config/tokenizer internals, but one is a missing `pad_token_id` on Llama4Config and the other is a tokenizer mapping `None` assumption; not the same underlying bug.", "right": "issue:44117"}, {"accept": false, "left": "issue:43012", "reason": "A PyTorch precision warning on bfloat16 compilation is not the same as a Bamba torch.compile SDPA failure; one is a warning, the other a functional model-specific bug.", "right": "issue:43550"}, {"accept": false, "left": "issue:43352", "reason": "Unsupported Flash Attention for Nemotron and a Python version compatibility break in Transformers 5.1.0 are unrelated problems.", "right": "issue:43976"}, {"accept": false, "left": "issue:43208", "reason": "xLSTM training blockers and BeitImageProcessorFast label reduction are different model/component bugs with no shared code path.", "right": "issue:43526"}, {"accept": false, "left": "issue:43064", "reason": "FSDP2/PEFT optimizer-state corruption and GPT2 attention scaling being ignored under SDPA/FlashAttention are distinct training vs attention-backend issues.", "right": "issue:44380"}, {"accept": false, "left": "issue:43618", "reason": "Both touch attention-related code, but CLIPOutput attentions not being assigned is a different symptom and path than GPT2 attention scaling being ignored.", "right": "issue:44380"}], "summary": "These issues are mostly unrelated; the soft similarities are superficial and do not indicate duplicate bugs or mergeable PRs. All soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42886|issue:43381", "issue:43937|issue:44792", "issue:42907|issue:44655", "issue:43525|issue:44117", "issue:43012|issue:43550", "issue:43352|issue:43976", "issue:43208|issue:43526", "issue:43064|issue:44380", "issue:43618|issue:44380"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6892, "estimated_input_tokens": 3318, "item_count": 18, "node_count": 18, "serialized_chars": 13270, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:06:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "09285ebae85a43157e9b2404adf50b5a329722cf1ba73dc9934d8542f90debcf", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36683", "issue:38617", "issue:43012", "issue:43299", "issue:43502", "issue:43582", "issue:43618", "issue:43756", "issue:43761", "issue:43867", "issue:44038", "issue:44230", "issue:44483", "issue:44485", "issue:44514", "issue:44683", "issue:45290", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "Issue 44483 is the broadest, highest-severity regression in the set and has the strongest connection to other Qwen/chat-template reports, but it still does not subsume the rest of the cluster.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:44483", "reason": "Both involve Qwen/chat-template paths, but one is a /v1/chat/completions acceptance failure and the other is a batched apply_chat_template crash with padding=False. Different entry points and failure modes.", "right": "issue:44514"}, {"accept": false, "left": "issue:43618", "reason": "Both are CLIP vision issues, but one reports missing attentions in CLIPOutput while the other reports hidden_states=None despite output_hidden_states=True. Related area, not the same bug.", "right": "issue:43761"}, {"accept": false, "left": "issue:36683", "reason": "Gemma3Config missing vocab_size and Apple Silicon caching_allocator_warmup TypeError are unrelated bugs in different parts of the stack.", "right": "issue:43582"}, {"accept": false, "left": "issue:43012", "reason": "Both mention precision/torch behavior, but one is a bfloat16 compile warning and the other is a compiled flex_attention failure on torch>=2.9. Different code paths and symptoms.", "right": "issue:44683"}, {"accept": false, "left": "issue:43756", "reason": "Both mention RoPE, but they concern different models and different implementation bugs: Smollm3 layer dropping versus GLM-5 RoPE behavior.", "right": "issue:44485"}, {"accept": false, "left": "issue:44483", "reason": "Both touch chat templating, but one is a request acceptance issue at /v1/chat/completions and the other is a crash on assistant tool-call messages with no content. Not the same defect.", "right": "issue:45290"}, {"accept": false, "left": "issue:38617", "reason": "An import error for layer_type_validation and unexpected API calls despite local_files_only=True are unrelated regressions.", "right": "issue:43502"}, {"accept": false, "left": "issue:44483", "reason": "The Qwen2.5-VL video vision_position_ids bug is a model-input/positioning issue, not the same as the /v1/chat/completions request rejection.", "right": "issue:45381"}, {"accept": false, "left": "issue:43299", "reason": "Qwen3-VL-Moe loading breakage and state_dict-sorted load failures both affect loading, but they are distinct causes and likely separate fixes.", "right": "issue:43867"}, {"accept": false, "left": "issue:44038", "reason": "One is a v5 regression breaking Qwen3-VL-Moe loading; the other is an fp8 support request for qwen3-vl-fp8/qwen3.5 moe inference. Not duplicates.", "right": "issue:44230"}], "summary": "This cluster is a loose collection of mostly unrelated Transformers bugs. A few pairs share the same broad subsystem or model family, but the reported failures are different enough that none look like true duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44483|issue:44514", "issue:43618|issue:43761", "issue:36683|issue:43582", "issue:43012|issue:44683", "issue:43756|issue:44485", "issue:44483|issue:45290", "issue:38617|issue:43502", "issue:44483|issue:45381", "issue:43299|issue:43867", "issue:44038|issue:44230"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6574, "estimated_input_tokens": 3159, "item_count": 17, "node_count": 17, "serialized_chars": 12633, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:07:01Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7512f62d031b1d2790c8348b656ddbec6de7030a270acb0bb0caeeee521a17e8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36683", "issue:42994", "issue:43064", "issue:43065", "issue:43066", "issue:43352", "issue:43475", "issue:43867", "issue:43901", "issue:43937", "issue:44077", "issue:44220", "issue:44483", "issue:44492", "issue:44655", "issue:45216", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "Issue 44655 is the clearest, most actionable, and broadest user-facing save_pretrained failure in the set, so it is the best single representative issue if one must be chosen.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43901", "reason": "Both touch pipelines/docs, but one is a docs mismatch about return_all_scores while the other is a save_pretrained failure; different bugs and code paths.", "right": "issue:44655"}, {"accept": false, "left": "issue:42994", "reason": "Both involve saving, but quantized model saving and pipeline object serialization are distinct failures with different mechanics.", "right": "issue:44655"}, {"accept": false, "left": "issue:43065", "reason": "Completely unrelated subsystems: SAM3 pixel decoder implementation vs chat/completions request validation.", "right": "issue:44483"}, {"accept": false, "left": "issue:36683", "reason": "Both are attribute errors, but in different model families and different missing fields; not the same underlying bug.", "right": "issue:43475"}, {"accept": false, "left": "issue:43867", "reason": "Both concern checkpoint save/load correctness, but one is a sorted-state_dict load error and the other is a bad saved checkpoint regression for Qwen3.5; different code paths.", "right": "issue:45216"}, {"accept": false, "left": "issue:43352", "reason": "Flash-Attention support gating and GenerationConfig validation are unrelated issues.", "right": "issue:43937"}, {"accept": false, "left": "issue:44220", "reason": "Audio feature extraction internals vs pipeline serialization; no shared underlying bug.", "right": "issue:44655"}, {"accept": false, "left": "issue:43066", "reason": "Tokenizer decoder-type mismatch and Qwen2.5-VL video position IDs are separate model/runtime problems.", "right": "issue:45381"}, {"accept": false, "left": "issue:43064", "reason": "Distributed trainer optimizer-state corruption and a cache-strategy typo are unrelated.", "right": "issue:44492"}, {"accept": false, "left": "issue:36683", "reason": "Gemma config missing vocab_size and PatchTSMixer post_init policy are different model/config concerns with no common fix.", "right": "issue:44077"}], "summary": "This cluster is heterogeneous: the items span unrelated model/config, saving/loading, training, docs, and model-specific runtime issues. None of the soft pairs look like true duplicates or the same underlying fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43901|issue:44655", "issue:42994|issue:44655", "issue:43065|issue:44483", "issue:36683|issue:43475", "issue:43867|issue:45216", "issue:43352|issue:43937", "issue:44220|issue:44655", "issue:43066|issue:45381", "issue:43064|issue:44492", "issue:36683|issue:44077"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7026, "estimated_input_tokens": 3385, "item_count": 18, "node_count": 18, "serialized_chars": 13537, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:07:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4de75a367376692b762d0da511e4f669559e97fff0cebd107ef9c45a9064a2d9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36683", "issue:41669", "issue:43010", "issue:43352", "issue:43441", "issue:43525", "issue:43618", "issue:43976", "issue:43992", "issue:44077", "issue:44188", "issue:44242", "issue:44246", "issue:44380", "issue:44938", "issue:45216", "issue:45278", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": "Issue 45278 is the broadest and most representative issue for the cluster, but only weakly so; most other items are narrower model- or backend-specific regressions.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45278 is the most umbrella-like user-facing report in the set (version-upgrade/import failures) and is the closest thing to a cluster anchor, though the overall set is heterogeneous.", "canonical_pr_reason": null, "confidence": 0.82, "soft_edge_verdicts": [{"accept": false, "left": "issue:43992", "reason": "Both are from_pretrained loading failures, but they affect different models and likely different missing keys/code paths.", "right": "issue:45310"}, {"accept": false, "left": "issue:44077", "reason": "One is about patchtsmixer post_init semantics; the other is GPT-2 attention scaling under SDPA/FlashAttention. Same broad area, not the same bug.", "right": "issue:44380"}, {"accept": false, "left": "issue:36683", "reason": "Both are missing config attributes, but on different model configs and different attributes; these are separate model-specific regressions.", "right": "issue:43525"}, {"accept": false, "left": "issue:43976", "reason": "Both concern Python-version compatibility, but the failures are in different versions and likely different import/load breakages.", "right": "issue:44938"}, {"accept": false, "left": "issue:44188", "reason": "Both mention attention/backend behavior, but they target different bugs and code paths; not mergeable as one fix.", "right": "issue:44380"}, {"accept": false, "left": "issue:43618", "reason": "CLIP attentions assignment and Qwen3.5 checkpoint saving are unrelated subsystems and failure modes.", "right": "issue:45216"}, {"accept": false, "left": "issue:36683", "reason": "Missing config attributes vs MoE load-balancing loss logic are unrelated.", "right": "issue:44242"}, {"accept": false, "left": "issue:43010", "reason": "One is about no_grad decoration on update methods; the other is generic import failures after upgrade. Different code paths and fixes.", "right": "issue:45278"}, {"accept": false, "left": "issue:43441", "reason": "Both involve FlashAttention/backend behavior, but one is unsupported-model failure and the other is attention scaling drift. Distinct bugs.", "right": "issue:44380"}, {"accept": false, "left": "issue:36683", "reason": "Different models and different missing-support/config-attribute problems.", "right": "issue:43352"}, {"accept": false, "left": "issue:41669", "reason": "Both are about import cost, but one is a concrete import-* performance regression and the other is a generic slow-import report; not clearly the same underlying issue.", "right": "issue:44246"}], "summary": "This cluster is mostly a loose collection of unrelated Transformers bug reports. The soft pairs share broad themes like import performance, config regressions, or FlashAttention/backend behavior, but they do not look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43992|issue:45310", "issue:44077|issue:44380", "issue:36683|issue:43525", "issue:43976|issue:44938", "issue:44188|issue:44380", "issue:43618|issue:45216", "issue:36683|issue:44242", "issue:43010|issue:45278", "issue:43441|issue:44380", "issue:36683|issue:43352", "issue:41669|issue:44246"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7058, "estimated_input_tokens": 3401, "item_count": 18, "node_count": 18, "serialized_chars": 13603, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:07:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ba845e07073165a5246e2dea93bdbe02485821fbe6d4e783a628181f1bf5d148", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36683", "issue:41669", "issue:43010", "issue:43064", "issue:43352", "issue:43618", "issue:43644", "issue:43867", "issue:43931", "issue:43976", "issue:44077", "issue:44484", "issue:44554", "issue:44938", "issue:45071", "issue:45357", "issue:45375", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "Issue 45071 has the strongest general applicability and the most cluster-level traction, so it is the best single issue to keep if one must be chosen.", "best_pr_reason": null, "canonical_issue_reason": "No true duplicate cluster here; issue 45071 is the broadest, most central regression report and the best anchor only by overall signal.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43352", "reason": "Different model-specific loading failures: Nemotron Flash Attention 2 support vs Qwen3-VL weight-shape mismatch.", "right": "issue:43931"}, {"accept": false, "left": "issue:43867", "reason": "State-dict sorting load error and a missing config field dropped by @strict are different serialization/config bugs.", "right": "issue:45375"}, {"accept": false, "left": "issue:36683", "reason": "Gemma3Config missing vocab_size is unrelated to the FSDP2/PEFT optimizer-state corruption on training ranks.", "right": "issue:43064"}, {"accept": false, "left": "issue:44938", "reason": "Python 3.14 import/load failure and PretrainedConfig type-checking breakage are different compatibility regressions.", "right": "issue:45071"}, {"accept": false, "left": "issue:43867", "reason": "Sorted state_dict loading error vs incorrect visual encoder keys saved by Qwen3.5 are different save/load issues.", "right": "issue:45357"}, {"accept": false, "left": "issue:43010", "reason": "@torch.no_grad on cache/layer updates and Python 3.9/3.10 support failure are unrelated changes.", "right": "issue:43976"}, {"accept": false, "left": "issue:44938", "reason": "Python 3.14 loading failure is not the same as the qwen2.5-vl video position-id bug.", "right": "issue:45381"}, {"accept": false, "left": "issue:43618", "reason": "CLIPOutput attentions assignment and patchtsmixer post_init restrictions affect different code paths.", "right": "issue:44077"}, {"accept": false, "left": "issue:41669", "reason": "Import-star performance cleanup is unrelated to the MPS attention correctness bug.", "right": "issue:44554"}, {"accept": false, "left": "issue:43644", "reason": "Non-persistent buffer junk in v5.0.0 is a different failure mode from Python 3.14 import/load problems.", "right": "issue:44938"}, {"accept": false, "left": "issue:43867", "reason": "A load error when state_dict is sorted is unrelated to the max_shard_size default question in save_pretrained().", "right": "issue:44484"}], "summary": "These items are a loose set of unrelated transformers regressions; none of the proposed soft pairs look like the same underlying bug/change. No PRs are present."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43352|issue:43931", "issue:43867|issue:45375", "issue:36683|issue:43064", "issue:44938|issue:45071", "issue:43867|issue:45357", "issue:43010|issue:43976", "issue:44938|issue:45381", "issue:43618|issue:44077", "issue:41669|issue:44554", "issue:43644|issue:44938", "issue:43867|issue:44484"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6998, "estimated_input_tokens": 3371, "item_count": 18, "node_count": 18, "serialized_chars": 13482, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:08:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "bb1a2efd3e8ec2614a8e52d32c826fb8cf95b709a7cc1fb6f3036a3865655854", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:28282", "issue:33453", "issue:36296", "issue:38617", "issue:42890", "issue:43010", "issue:43232", "issue:43502", "issue:43575", "issue:43618", "issue:43992", "issue:44062", "issue:44075", "issue:44112", "issue:44857", "issue:44964", "issue:44977", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:38617", "reason": "Different failures: missing `layer_type_validation` import vs missing `embed_tokens.weight` in UMT5 loading. Same area (loading) but not the same bug.", "right": "issue:43992"}, {"accept": false, "left": "issue:36296", "reason": "Both mention tensor parallelism, but one is a general training bug and the other is an OOM when loading a specific Qwen2 model. Not the same underlying issue.", "right": "issue:43575"}, {"accept": false, "left": "issue:42890", "reason": "One is a flaky integration test due to missing `set_seed`; the other is a model load failure for Phi-4 multimodal. Different problem classes.", "right": "issue:44964"}, {"accept": false, "left": "issue:42890", "reason": "Test flakiness from missing seeds is unrelated to a CI failure caused by stale device override behavior.", "right": "issue:44112"}, {"accept": false, "left": "issue:43618", "reason": "Both involve model outputs/losses, but CLIP attentions assignment and an AMP/CUDA crash in LwDetrImageLoss are distinct code paths and symptoms.", "right": "issue:44857"}, {"accept": false, "left": "issue:28282", "reason": "AutoModel PyTorch import error is unrelated to a tokenizer-loading regression.", "right": "issue:33453"}, {"accept": false, "left": "issue:43618", "reason": "CLIP attentions missing vs Qwen3.5 flash-attention generation failure are different models and different bugs.", "right": "issue:44977"}, {"accept": false, "left": "issue:43502", "reason": "`local_files_only=True` still causing requests is a networking/cache bug, not the same as `AddedToken(..., special=...)` argument duplication.", "right": "issue:44062"}, {"accept": false, "left": "issue:43010", "reason": "`@torch.no_grad` on cache/layer updates is unrelated to SGD optimizer arguments being ignored.", "right": "issue:44075"}, {"accept": false, "left": "issue:42890", "reason": "A flaky model integration test is not the same as a small `testing_utils.py` bug, even though both touch tests.", "right": "issue:45341"}, {"accept": false, "left": "issue:43010", "reason": "Both touch generation internals, but one is about no-grad on cache/layer updates and the other is about `_update_model_kwargs_for_generation` after `sync_gpus`. Different fixes.", "right": "issue:43232"}], "summary": "These issues are a loose topical cluster around Transformers, but they describe distinct bugs across unrelated code paths (imports, tokenizers, tensor parallelism, tests, generation, loading, and model-specific failures). No soft pair looks like a true duplicate or mergeable PR-equivalent change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:38617|issue:43992", "issue:36296|issue:43575", "issue:42890|issue:44964", "issue:42890|issue:44112", "issue:43618|issue:44857", "issue:28282|issue:33453", "issue:43618|issue:44977", "issue:43502|issue:44062", "issue:43010|issue:44075", "issue:42890|issue:45341", "issue:43010|issue:43232"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6644, "estimated_input_tokens": 3194, "item_count": 17, "node_count": 17, "serialized_chars": 12775, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:08:30Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "045cb2be5efb244edca02d506126ee1d6c7ff78f8d892aee7cb8ece298bfc012", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30990", "issue:31795", "issue:42548", "issue:43010", "issue:43065", "issue:43502", "issue:43723", "issue:44222", "issue:44351", "issue:44355", "issue:44368", "issue:44387", "issue:44485", "issue:44556", "issue:44749", "issue:45125", "issue:45375"], "result": {"analyst_result": {"best_issue_reason": "44556 is the most specific and actionable report among the items, with a concrete upgrade/reload failure mode. It is the best issue to anchor triage, even though it does not truly subsume the others.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44556 is the clearest concrete user-facing regression report about model loading after a version upgrade, so it is the best single representative of the group. That said, the overall set is too diverse for a strong canonical choice.", "canonical_pr_reason": null, "confidence": 0.23, "soft_edge_verdicts": [{"accept": false, "left": "issue:43723", "reason": "Both involve loading, but one is tokenizer loading in v5 and the other is reloading a checkpoint after upgrading versions; different code paths and failures.", "right": "issue:44556"}, {"accept": false, "left": "issue:42548", "reason": "Both are import errors, but they concern different exported symbols and likely different missing-export causes; not the same concrete bug.", "right": "issue:44351"}, {"accept": false, "left": "issue:44387", "reason": "Int4 quantization OOM from memory changes is unrelated to checkpoint reload failure after version upgrade.", "right": "issue:44556"}, {"accept": false, "left": "issue:44222", "reason": "Both mention MoE-related models, but one is FP8 save_pretrained behavior and the other is tensor-parallel metadata; different bugs.", "right": "issue:45125"}, {"accept": false, "left": "issue:43010", "reason": "A no_grad decorator issue on cache/layer updates is unrelated to a dummy Conv2d in Sam3PixelDecoder.", "right": "issue:43065"}, {"accept": false, "left": "issue:45125", "reason": "Both are Qwen3-related, but one is missing _tp_plan for tensor parallelism and the other is a missing vision config field; different subsystems.", "right": "issue:45375"}, {"accept": false, "left": "issue:44355", "reason": "Compiled Python file errors and data filtering slowdown after upgrade are unrelated symptoms and code paths.", "right": "issue:44749"}, {"accept": false, "left": "issue:30990", "reason": "Sentence Transformers loading hang and forward-argument documentation confusion are unrelated.", "right": "issue:31795"}, {"accept": false, "left": "issue:43502", "reason": "Local-files-only network requests and GLM-5 RoPE implementation are unrelated issues.", "right": "issue:44485"}, {"accept": false, "left": "issue:43010", "reason": "A cache/update no_grad bug and a tie_word_embeddings warning during LoRA fine-tuning are not the same underlying change.", "right": "issue:44368"}], "summary": "The cluster is very heterogeneous: it mixes unrelated import errors, loading regressions, quantization/OOM, configuration warnings, and documentation issues. None of the soft pairs look like the same underlying bug/change closely enough to merge as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43723|issue:44556", "issue:42548|issue:44351", "issue:44387|issue:44556", "issue:44222|issue:45125", "issue:43010|issue:43065", "issue:45125|issue:45375", "issue:44355|issue:44749", "issue:30990|issue:31795", "issue:43502|issue:44485", "issue:43010|issue:44368"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6532, "estimated_input_tokens": 3138, "item_count": 17, "node_count": 17, "serialized_chars": 12551, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:08:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4e36ec9eb7cd995b35b83d3f337b65b0ef1a3d483d2808c6104ccf3b6cefc235", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36296", "issue:38175", "issue:42831", "issue:42890", "issue:43232", "issue:43299", "issue:43502", "issue:43874", "issue:44075", "issue:44315", "issue:44355", "issue:44360", "issue:44373", "issue:44556", "issue:44655", "issue:44961", "issue:45071"], "result": {"analyst_result": {"best_issue_reason": "Issue 43502 is the strongest standalone canonical issue because it is precise, user-impacting, and narrowly defined compared with the other broader or more ambiguous reports.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43502 is the clearest, well-scoped bug report with a concrete reproducible symptom and a specific behavioral contract violation (`local_files_only=True` still making requests).", "canonical_pr_reason": null, "confidence": 0.77, "soft_edge_verdicts": [{"accept": false, "left": "issue:44315", "reason": "Different bugs: one is Liger Kernel not being applied in `model_init`; the other is a DSA/ReLU implementation concern. No shared code-path evidence.", "right": "issue:44360"}, {"accept": false, "left": "issue:36296", "reason": "Tensor-parallel training and SGD optimizer arguments are unrelated problems affecting different code paths.", "right": "issue:44075"}, {"accept": false, "left": "issue:42890", "reason": "A flaky test due to missing `set_seed()` is unrelated to a docstring error for `position_ids`.", "right": "issue:44373"}, {"accept": false, "left": "issue:38175", "reason": "One is a model output/zero-probability issue; the other is unwanted network access with `local_files_only=True`. Different failures and fixes.", "right": "issue:43502"}, {"accept": false, "left": "issue:43502", "reason": "`local_files_only` network access has no clear relation to the unrelated 'racoon' issue.", "right": "issue:44961"}, {"accept": false, "left": "issue:43299", "reason": "Both involve loading, but one is a Qwen3VL MoE loading break in dev 5.0 and the other is checkpoint reload incompatibility across versions; not the same concrete bug.", "right": "issue:44556"}, {"accept": false, "left": "issue:44355", "reason": "Compiled Python file errors and pipeline `save_pretrained` failures are distinct behaviors with different code paths.", "right": "issue:44655"}, {"accept": false, "left": "issue:44355", "reason": "Compiled-file handling and `PretrainedConfig` type-checking regressions are separate issues.", "right": "issue:45071"}, {"accept": false, "left": "issue:42831", "reason": "FineGrainedFP8 accuracy degradation and a missing image-processor method causing token counting errors are different bugs.", "right": "issue:43874"}, {"accept": false, "left": "issue:42890", "reason": "A test seed flake and a generation kwarg/update bug after `sync_gpus` are unrelated.", "right": "issue:43232"}], "summary": "These items are a heterogeneous set of unrelated bug reports, mostly centered on Transformers/model-loading, but they do not describe the same underlying defect. All soft pairs are best treated as distinct issues."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44315|issue:44360", "issue:36296|issue:44075", "issue:42890|issue:44373", "issue:38175|issue:43502", "issue:43502|issue:44961", "issue:43299|issue:44556", "issue:44355|issue:44655", "issue:44355|issue:45071", "issue:42831|issue:43874", "issue:42890|issue:43232"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7064, "estimated_input_tokens": 3404, "item_count": 18, "node_count": 18, "serialized_chars": 13614, "soft_pair_count": 12}, "cached_at": "2026-04-14T13:09:04Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5c55fd9cb6ac7d1af085ea212a4eb06fc7014b103dc96e17324b11915110aeb2", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:31795", "issue:33357", "issue:33453", "issue:34689", "issue:36296", "issue:38617", "issue:41628", "issue:42548", "issue:43576", "issue:43673", "issue:43824", "issue:43827", "issue:44222", "issue:44230", "issue:44351", "issue:44492", "issue:44961", "issue:45071"], "result": {"analyst_result": {"best_issue_reason": "No issue is a strong global canonical for this cluster because the reports are heterogeneous and do not share one concrete root cause.", "best_pr_reason": null, "canonical_issue_reason": "No single issue cleanly represents the cluster; the items span different symptoms, code paths, and problem classes, so collapsing them would be misleading.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:44222", "reason": "Both mention fp8/moe, but one is a save_pretrained bug and the other is an inference support request; different actions and code paths.", "right": "issue:44230"}, {"accept": false, "left": "issue:33453", "reason": "Tokenizer loading regression vs. model loading failure for Llama 3.2 Vision; similar release timing, but not the same bug.", "right": "issue:34689"}, {"accept": false, "left": "issue:33453", "reason": "Tokenizer loading vs tensor-parallel training bug are different failure modes in different subsystems.", "right": "issue:36296"}, {"accept": false, "left": "issue:43673", "reason": "Cache missing during chunked_prefill is a runtime regression; typo in cache strategies is a documentation/naming issue.", "right": "issue:44492"}, {"accept": false, "left": "issue:43827", "reason": "Docs still referencing pipeline() is unrelated to the placeholder 'racoon' issue.", "right": "issue:44961"}, {"accept": false, "left": "issue:43576", "reason": "Broken v5 env command and missing Qwen2.5-VL import are distinct import/CLI problems.", "right": "issue:43824"}, {"accept": false, "left": "issue:43576", "reason": "env command breakage is not the same as PretrainedConfig type checking regression.", "right": "issue:45071"}, {"accept": false, "left": "issue:38617", "reason": "ImportError for layer_type_validation and the broken env command are different surfaces and likely different causes.", "right": "issue:43576"}, {"accept": false, "left": "issue:33357", "reason": "MacOS bus error with CLIP is unrelated to tokenizer loading regression.", "right": "issue:33453"}, {"accept": false, "left": "issue:41628", "reason": "Missing AutoImageProcessor and missing PreTrainedModel are separate import failures with different symbols.", "right": "issue:42548"}, {"accept": false, "left": "issue:31795", "reason": "Documentation confusion about forward args does not match an ImportError for layer_type_validation.", "right": "issue:38617"}, {"accept": false, "left": "issue:43576", "reason": "Broken env command and missing HybridCache import are different issues, even if both are v5-related.", "right": "issue:44351"}], "summary": "This cluster is a grab-bag of unrelated issues: docs confusion, import errors, tokenizer/model loading regressions, cache/type-checking bugs, and one unrelated Mac bus error. None of the soft pairs look like the same underlying defect or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44222|issue:44230", "issue:33453|issue:34689", "issue:33453|issue:36296", "issue:43673|issue:44492", "issue:43827|issue:44961", "issue:43576|issue:43824", "issue:43576|issue:45071", "issue:38617|issue:43576", "issue:33357|issue:33453", "issue:41628|issue:42548", "issue:31795|issue:38617", "issue:43576|issue:44351"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7056, "estimated_input_tokens": 3400, "item_count": 18, "node_count": 18, "serialized_chars": 13597, "soft_pair_count": 12}, "cached_at": "2026-04-14T13:09:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f17cc0b0a5568c2d4b5c79b87216e5c4710953f057e0cee6fb190bcc6188c06e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36296", "issue:41628", "issue:43012", "issue:43526", "issue:43576", "issue:43901", "issue:44261", "issue:44273", "issue:44355", "issue:44360", "issue:44485", "issue:44556", "issue:44623", "issue:44908", "issue:44938", "issue:44961", "issue:45003", "issue:45230"], "result": {"analyst_result": {"best_issue_reason": "Issue 45003 is the strongest standalone bug report in the set because it is specific, open, and has the most follow-on discussion/inbound references. It still does not represent the rest of the items as duplicates.", "best_pr_reason": null, "canonical_issue_reason": "No true canonical issue: these are not duplicates of one underlying bug. If a placeholder representative is needed, issue 45003 is the most concrete and actively discussed standalone report.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:44908", "reason": "Different problems: scheduler kwargs handling vs an unrelated issue with no shared code-path evidence.", "right": "issue:44961"}, {"accept": false, "left": "issue:43576", "reason": "Both mention loading/CLI behavior, but one is a broken env command and the other is a Python 3.14 import/load failure; not the same bug.", "right": "issue:44938"}, {"accept": false, "left": "issue:44485", "reason": "GLM-5 RoPE implementation is model-specific; \"racoon\" provides no matching concrete bug or code-path.", "right": "issue:44961"}, {"accept": false, "left": "issue:44273", "reason": "Both involve loading, but one is lazy-loading behavior and the other is unsafe sys.modules access; too different to merge.", "right": "issue:45003"}, {"accept": false, "left": "issue:36296", "reason": "Tensor parallel training and TextClassificationPipeline docs are unrelated issues.", "right": "issue:43901"}, {"accept": false, "left": "issue:41628", "reason": "Importing AutoImageProcessor is a symbol/export issue; compiled Python file failures are a separate runtime problem.", "right": "issue:44355"}, {"accept": false, "left": "issue:36296", "reason": "Tensor-parallel training bug and bfloat16 warning are unrelated training/runtime behaviors.", "right": "issue:43012"}, {"accept": false, "left": "issue:36296", "reason": "Tensor parallel training has no evident overlap with BeitImageProcessorFast reduce_labels behavior.", "right": "issue:43526"}, {"accept": false, "left": "issue:44261", "reason": "MLA q_a_layernorm config precision issue is unrelated to the \"racoon\" report.", "right": "issue:44961"}, {"accept": false, "left": "issue:44261", "reason": "Missing rms_norm_eps in MLA and a DSA indexer ReLU concern are different code paths and symptoms.", "right": "issue:44360"}, {"accept": false, "left": "issue:43576", "reason": "A broken env command and checkpoint reload compatibility after version upgrade are distinct issues.", "right": "issue:44556"}, {"accept": false, "left": "issue:44623", "reason": "processor.save_pretrained missing files is a specific serialization bug; \"Bug report\" is too vague to establish the same underlying problem.", "right": "issue:45230"}], "summary": "This cluster is highly heterogeneous: the issues span unrelated bugs in scheduling, loading/imports, CLI, docs, model configs, and version compatibility. There is no convincing duplicate set here, so all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44908|issue:44961", "issue:43576|issue:44938", "issue:44485|issue:44961", "issue:44273|issue:45003", "issue:36296|issue:43901", "issue:41628|issue:44355", "issue:36296|issue:43012", "issue:36296|issue:43526", "issue:44261|issue:44961", "issue:44261|issue:44360", "issue:43576|issue:44556", "issue:44623|issue:45230"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6404, "estimated_input_tokens": 3074, "item_count": 17, "node_count": 17, "serialized_chars": 12296, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:09:50Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "baedbd39477d26a7afb8142cab40581a1ac0e3aee9e3580ccd6d83e8f47fe058", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41669", "issue:42371", "issue:42890", "issue:43673", "issue:43937", "issue:44038", "issue:44246", "issue:44273", "issue:44360", "issue:44393", "issue:44492", "issue:44556", "issue:44704", "issue:44908", "issue:45071", "issue:45092", "issue:45125"], "result": {"analyst_result": {"best_issue_reason": "issue:44273 is the best representative issue in the cluster because it names the core behavior problem rather than just the performance symptom.", "best_pr_reason": null, "canonical_issue_reason": "issue:44273 is the more direct statement of the underlying bug (lazy loading broken), while issue:44246 reads like a user-visible symptom of slow import. It is the better canonical issue if these are deduped.", "canonical_pr_reason": null, "confidence": 0.78, "soft_edge_verdicts": [{"accept": false, "left": "issue:41669", "reason": "Import-* cleanup vs generation cache regression are unrelated code paths and problems.", "right": "issue:43673"}, {"accept": false, "left": "issue:44393", "reason": "Multimodal 2D box output bug is unrelated to PretrainedConfig type-checking regressions.", "right": "issue:45071"}, {"accept": false, "left": "issue:44038", "reason": "Qwen3-VL-Moe bug and missing _tp_plan for tensor parallelism are different model issues.", "right": "issue:45125"}, {"accept": false, "left": "issue:43937", "reason": "GenerationConfig validation and a DSA indexer ReLU discussion are unrelated.", "right": "issue:44360"}, {"accept": false, "left": "issue:44556", "reason": "Both concern checkpoint/loading compatibility, but they target different scenarios and likely different fixes.", "right": "issue:45092"}, {"accept": false, "left": "issue:42371", "reason": "TF32 settings guidance and a cache-strategy typo are not the same underlying issue.", "right": "issue:44492"}, {"accept": false, "left": "issue:42890", "reason": "Test seeding flakiness and inverse_sqrt scheduler kwargs handling are unrelated.", "right": "issue:44908"}, {"accept": true, "left": "issue:44246", "reason": "Both point to the same import/lazy-loading path: slow import behavior and lazy loading not functioning properly are likely the same underlying bug.", "right": "issue:44273"}, {"accept": false, "left": "issue:44704", "reason": "AutoProcessor cached_file kwargs forwarding and scheduler kwargs propagation are separate code paths.", "right": "issue:44908"}], "summary": "This cluster is mostly a loose topical mix rather than true duplicates. The only plausible duplicate pair is the import/lazy-loading reports: \"import transformers takes long sometimes\" and \"Lazy loading is not working properly\"."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:41669|issue:43673", "issue:44393|issue:45071", "issue:44038|issue:45125", "issue:43937|issue:44360", "issue:44556|issue:45092", "issue:42371|issue:44492", "issue:42890|issue:44908", "issue:44246|issue:44273", "issue:44704|issue:44908"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7312, "estimated_input_tokens": 3528, "item_count": 18, "node_count": 18, "serialized_chars": 14111, "soft_pair_count": 13}, "cached_at": "2026-04-14T13:10:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f2e6ba99412b806ad83a00b680fd93259f5fe5c4e413ce7e2f9d394592105fab", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41669", "issue:43352", "issue:43408", "issue:43576", "issue:43827", "issue:43976", "issue:44295", "issue:44297", "issue:44315", "issue:44351", "issue:44393", "issue:44492", "issue:44829", "issue:44908", "issue:44945", "issue:45071", "issue:45230", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": "#45071 has the clearest failure mode, the strongest inbound activity, and the most actionable bug report among these unrelated issues.", "best_pr_reason": null, "canonical_issue_reason": "No true duplicate center is evident; #45071 is the most concrete and widely referenced regression, so it is the best anchor only by prominence.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43408", "reason": "Different bugs: a model-type mismatch warning versus Liger Kernel not being applied in model_init.", "right": "issue:44315"}, {"accept": false, "left": "issue:43576", "reason": "Unrelated surfaces: broken `transformers env` command versus scheduler kwargs being ignored.", "right": "issue:44908"}, {"accept": false, "left": "issue:43976", "reason": "Different problem domains: Python version compatibility versus Qwen3-VL 2D bbox output errors.", "right": "issue:44393"}, {"accept": false, "left": "issue:43352", "reason": "Flash Attention 2.0 support for Nemotron is unrelated to Qwen3-VL bounding-box hallucinations.", "right": "issue:44393"}, {"accept": false, "left": "issue:44908", "reason": "A scheduler-parameter bug and a `PretrainedConfig` type-checking regression are separate code paths.", "right": "issue:45071"}, {"accept": false, "left": "issue:41669", "reason": "Import-time performance regression from `import *` is different from missing `HybridCache` export/import failure.", "right": "issue:44351"}, {"accept": false, "left": "issue:44315", "reason": "Both involve training behavior, but Liger Kernel application and flash_attention_3 degeneration are not the same bug.", "right": "issue:44829"}, {"accept": false, "left": "issue:44297", "reason": "`tokenizer.save_pretrained` metadata mismatch is not identifiable from a generic 'Bug report' title.", "right": "issue:45230"}, {"accept": false, "left": "issue:41669", "reason": "A large import slowdown is unrelated to an unspecified testing_utils bug.", "right": "issue:45341"}, {"accept": false, "left": "issue:41669", "reason": "Import-performance cleanup and a cache-strategy typo are distinct issues.", "right": "issue:44492"}, {"accept": false, "left": "issue:44492", "reason": "A typo in cache strategy docs/code is not the same as incorrect pipeline-parallel LLM outputs.", "right": "issue:44945"}, {"accept": false, "left": "issue:44295", "reason": "The buffer read error is concrete, while the other issue is too generic to establish identity.", "right": "issue:45230"}, {"accept": false, "left": "issue:43827", "reason": "Documentation still mentioning `pipeline()` is unrelated to the inverse_sqrt scheduler ignoring kwargs.", "right": "issue:44908"}], "summary": "The set is heterogeneous: the soft-similarity pairs are mostly false positives across different subsystems, symptoms, and code paths. No PRs are present. If an issue must serve as an anchor, #45071 is the strongest standalone regression, but it does not represent the rest of the set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43408|issue:44315", "issue:43576|issue:44908", "issue:43976|issue:44393", "issue:43352|issue:44393", "issue:44908|issue:45071", "issue:41669|issue:44351", "issue:44315|issue:44829", "issue:44297|issue:45230", "issue:41669|issue:45341", "issue:41669|issue:44492", "issue:44492|issue:44945", "issue:44295|issue:45230", "issue:43827|issue:44908"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7082, "estimated_input_tokens": 3413, "item_count": 18, "node_count": 18, "serialized_chars": 13649, "soft_pair_count": 12}, "cached_at": "2026-04-14T13:10:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c0ce2a8246873d502496cded7a3f547423ec640548abab721be4dd6a235e5644", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:31795", "issue:33453", "issue:41669", "issue:43519", "issue:43576", "issue:43704", "issue:43723", "issue:43937", "issue:44077", "issue:44273", "issue:44393", "issue:44485", "issue:44655", "issue:44861", "issue:44908", "issue:45125", "issue:45230", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": "issue:45125 is the strongest standalone issue because it identifies a specific code-path failure and likely actionable fix surface.", "best_pr_reason": null, "canonical_issue_reason": "issue:45125 is the most concrete and implementation-specific report in the set: it names a precise missing _tp_plan tensor-parallelism path, unlike broader or vaguer bug titles.", "canonical_pr_reason": null, "confidence": 0.78, "soft_edge_verdicts": [{"accept": false, "left": "issue:44077", "reason": "Different bugs: one is about an optional post_init constraint in PatchTSMixer, the other is an AttributeError in tied-weight key handling.", "right": "issue:44861"}, {"accept": false, "left": "issue:41669", "reason": "Unrelated subsystems: import-star model loading performance vs VRAM leakage in Qwen3ForCausalLM with dataloader threads.", "right": "issue:43704"}, {"accept": false, "left": "issue:31795", "reason": "Documentation confusion in model.forward is not the same as a tokenizer loading regression.", "right": "issue:33453"}, {"accept": false, "left": "issue:43723", "reason": "Tokenizer loading in v5 and Qwen3-VL 2D bounding box output are distinct problems with different code paths.", "right": "issue:44393"}, {"accept": false, "left": "issue:44655", "reason": "Saving Pipeline objects is unrelated to _get_tied_weight_keys crashing on list-valued _tied_weights_keys.", "right": "issue:44861"}, {"accept": false, "left": "issue:43576", "reason": "A broken CLI command and lazy-loading behavior are not the same underlying bug.", "right": "issue:44273"}, {"accept": false, "left": "issue:43937", "reason": "GLM-5 GenerationConfig validation and Qwen3-VL bounding-box errors affect different models and logic.", "right": "issue:44393"}, {"accept": false, "left": "issue:45125", "reason": "Both concern Qwen3.5 MoE loading, but one is specifically missing _tp_plan for tensor parallelism while the other is a broader from_pretrained error; not enough evidence they are the same code-path.", "right": "issue:45310"}, {"accept": false, "left": "issue:44273", "reason": "Lazy loading failure and inverse_sqrt scheduler kwargs handling are unrelated.", "right": "issue:44908"}, {"accept": false, "left": "issue:45230", "reason": "issue:45230 is too generic to match the specific Qwen3.5 MoE from_pretrained failure.", "right": "issue:45310"}, {"accept": false, "left": "issue:43519", "reason": "Timestamp calculation in Qwen3VL Processor is unrelated to GLM-5 RoPE implementation.", "right": "issue:44485"}, {"accept": false, "left": "issue:43519", "reason": "Timestamp math in a vision processor and scheduler keyword propagation are different bugs.", "right": "issue:44908"}], "summary": "These are mostly unrelated issues that only look similar at a high level. I would not merge any of the soft-edge pairs; the cluster is heterogeneous, so the best representative issue is one of the concrete Qwen3.5 MoE loading reports."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44077|issue:44861", "issue:41669|issue:43704", "issue:31795|issue:33453", "issue:43723|issue:44393", "issue:44655|issue:44861", "issue:43576|issue:44273", "issue:43937|issue:44393", "issue:45125|issue:45310", "issue:44273|issue:44908", "issue:45230|issue:45310", "issue:43519|issue:44485", "issue:43519|issue:44908"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7134, "estimated_input_tokens": 3439, "item_count": 18, "node_count": 18, "serialized_chars": 13756, "soft_pair_count": 12}, "cached_at": "2026-04-14T13:10:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "539c5a848167446a57f484fc65bfda2dac7446ddc932b709f2c8076b24884cf9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:31795", "issue:34689", "issue:37428", "issue:42371", "issue:42890", "issue:42913", "issue:43066", "issue:43502", "issue:43519", "issue:43704", "issue:44273", "issue:44315", "issue:44393", "issue:44485", "issue:44623", "issue:44908", "issue:44998", "issue:45071"], "result": {"analyst_result": {"best_issue_reason": "Issue 45071 is the best overall issue to keep as the representative item because it is specific, high-impact, and clearly framed as a regression rather than a vague feature request.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45071 is the strongest standalone candidate: it describes a concrete versioned regression, appears broadly impactful, and has the most obvious cross-references among the listed issues.", "canonical_pr_reason": null, "confidence": 0.41, "soft_edge_verdicts": [{"accept": false, "left": "issue:31795", "reason": "Different problems: documentation wording in model.forward vs a model-loading breakage for a specific Llama vision model.", "right": "issue:34689"}, {"accept": false, "left": "issue:44315", "reason": "Different code paths and symptoms: model_init/Liger Kernel application vs Qwen3-VL 2D bounding box output errors.", "right": "issue:44393"}, {"accept": false, "left": "issue:31795", "reason": "Unrelated: forward-API documentation confusion does not match a flash-attention import error.", "right": "issue:37428"}, {"accept": false, "left": "issue:44273", "reason": "Different scope: lazy loading behavior vs a PretrainedConfig type-checking regression.", "right": "issue:45071"}, {"accept": false, "left": "issue:43704", "reason": "Different models and bugs: VRAM leak in Qwen3ForCausalLM vs GLM-5 RoPE implementation discussion.", "right": "issue:44485"}, {"accept": false, "left": "issue:42371", "reason": "No shared underlying bug: TF32 configuration guidance vs a multi-thread VRAM leak.", "right": "issue:43704"}, {"accept": false, "left": "issue:43502", "reason": "Different failures: unexpected network access with local_files_only vs incorrect timestamp calculation in a processor.", "right": "issue:43519"}, {"accept": false, "left": "issue:43519", "reason": "Both involve processors, but one is timestamp math and the other is save_pretrained file output; not the same bug.", "right": "issue:44623"}, {"accept": false, "left": "issue:43519", "reason": "Unrelated issues affecting different components and failure modes.", "right": "issue:43704"}, {"accept": false, "left": "issue:42913", "reason": "Closest pair, but still not enough to treat as the same bug: one is a broad v4-to-v5 tokenizer behavior change, the other is a specific decoder-type mismatch.", "right": "issue:43066"}, {"accept": false, "left": "issue:42890", "reason": "Different concerns: flaky test seeding vs runtime VRAM leakage.", "right": "issue:43704"}, {"accept": false, "left": "issue:44908", "reason": "Clearly unrelated: scheduler keyword handling vs an off-topic issue.", "right": "issue:44998"}], "summary": "The set is mostly unrelated issues. None of the soft pairs are strong duplicates; the tokenizer pair is the closest but still too broad to merge confidently from titles alone. No PRs are present."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:31795|issue:34689", "issue:44315|issue:44393", "issue:31795|issue:37428", "issue:44273|issue:45071", "issue:43704|issue:44485", "issue:42371|issue:43704", "issue:43502|issue:43519", "issue:43519|issue:44623", "issue:43519|issue:43704", "issue:42913|issue:43066", "issue:42890|issue:43704", "issue:44908|issue:44998"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7388, "estimated_input_tokens": 3566, "item_count": 18, "node_count": 18, "serialized_chars": 14264, "soft_pair_count": 14}, "cached_at": "2026-04-14T13:11:18Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b1135c42123138d90fecc74b5f901a78dcccc2b6311276a7311b88910f98668b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:31515", "issue:31795", "issue:33453", "issue:36296", "issue:36683", "issue:38617", "issue:43525", "issue:43572", "issue:43618", "issue:44230", "issue:44492", "issue:44556", "issue:44623", "issue:44861", "issue:44998", "issue:45290", "issue:45310", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": "Issue 44556 is the most representative user-facing bug among this set, with a specific upgrade/reload failure mode rather than a vague or unrelated symptom.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44556 is the clearest concrete loading-regression report and best matches the broad checkpoint/reload theme; the others are mostly distinct config, tokenizer, docs, or unrelated bugs.", "canonical_pr_reason": null, "confidence": 0.81, "soft_edge_verdicts": [{"accept": false, "left": "issue:44861", "reason": "Different bugs: one is a tied-weights AttributeError, the other is a chat template crash on tool-call assistant messages.", "right": "issue:45290"}, {"accept": false, "left": "issue:44998", "reason": "No clear shared underlying bug; the titles are unrelated and there is no evidence they target the same code path.", "right": "issue:45341"}, {"accept": false, "left": "issue:44492", "reason": "A cache-strategy typo is not the same as the unrelated issue 44998.", "right": "issue:44998"}, {"accept": false, "left": "issue:43618", "reason": "CLIP attentions assignment regression is unrelated to issue 44998.", "right": "issue:44998"}, {"accept": false, "left": "issue:44556", "reason": "44556 is a checkpoint reload/version-upgrade failure; issue 44998 does not look like the same bug.", "right": "issue:44998"}, {"accept": false, "left": "issue:43525", "reason": "Both are AttributeError-type reports, but they concern different missing config/weight attributes and different code paths.", "right": "issue:44861"}, {"accept": false, "left": "issue:43572", "reason": "Different attribute/migration problems: StableLm pad_token_idx versus tied-weights handling.", "right": "issue:44861"}, {"accept": false, "left": "issue:31515", "reason": "Slow from_pretrained checkpoint loading and an ImportError from a removed symbol are unrelated.", "right": "issue:38617"}, {"accept": false, "left": "issue:44230", "reason": "FP8 inference support and missing processor.save_pretrained files are different features/bugs.", "right": "issue:44623"}, {"accept": false, "left": "issue:31515", "reason": "Model checkpoint loading slowdown is not the same as a tensor-parallel training bug.", "right": "issue:36296"}, {"accept": false, "left": "issue:31515", "reason": "Checkpoint loading performance and forward() documentation confusion are unrelated.", "right": "issue:31795"}, {"accept": false, "left": "issue:31515", "reason": "Both mention loading, but one is slow checkpoint loading and the other is a tokenizer loading regression; not the same concrete bug.", "right": "issue:33453"}, {"accept": false, "left": "issue:44230", "reason": "Both are fp8/Qwen-related at a high level, but they target different failures and code paths.", "right": "issue:45310"}, {"accept": false, "left": "issue:31515", "reason": "Slow checkpoint loading and Gemma3Config missing vocab_size are different bugs with different symptoms and fixes.", "right": "issue:36683"}], "summary": "This cluster is noisy: several items mention loading/from_pretrained/config regressions, but the soft pairs do not appear to be the same underlying bug or change. No soft edges should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44861|issue:45290", "issue:44998|issue:45341", "issue:44492|issue:44998", "issue:43618|issue:44998", "issue:44556|issue:44998", "issue:43525|issue:44861", "issue:43572|issue:44861", "issue:31515|issue:38617", "issue:44230|issue:44623", "issue:31515|issue:36296", "issue:31515|issue:31795", "issue:31515|issue:33453", "issue:44230|issue:45310", "issue:31515|issue:36683"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 990, "estimated_input_tokens": 367, "item_count": 2, "node_count": 2, "serialized_chars": 1465, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:11:26Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ba47190d12e1e2d309fa1af1be57e830dd0b153c890b747f8415d3e0445e5270", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43298", "issue:44230"], "result": {"analyst_result": {"best_issue_reason": "#43298 is the best issue in the set because it states a focused runtime limitation with a clear user-facing failure mode. #44230 is a separate model-specific FP8 support request and is not a duplicate.", "best_pr_reason": null, "canonical_issue_reason": "Issue #43298 is the more concrete bug report and is better suited as the representative issue for this small cluster; it describes a specific missing capability rather than a broad model-support request.", "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:43298", "reason": "Different problems in different model/runtime areas: audio continuous batching support vs FP8 inference support for Qwen3-VL/Qwen3.5 MoE. No shared code path or same underlying bug is evident.", "right": "issue:44230"}], "summary": "The two issues are about different topics: audio continuous batching support versus FP8 inference support for Qwen vision/MoE models. They do not look like duplicates or the same underlying bug/change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43298|issue:44230"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2484, "estimated_input_tokens": 1114, "item_count": 4, "node_count": 4, "serialized_chars": 4454, "soft_pair_count": 6}, "cached_at": "2026-04-14T13:11:45Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d000a7917b228d6163914de32848882e6421c18b8931372a608ccfd5296aac59", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:27088", "issue:43834", "issue:44365", "issue:44370"], "result": {"analyst_result": {"best_issue_reason": "issue:27088 is the most useful representative because it names an actual language and appears to be a real, specific request rather than a template stub.", "best_pr_reason": null, "canonical_issue_reason": "issue:27088 is the only concrete, language-specific issue; the others are generic placeholder/template variants and do not represent the same actionable request.", "canonical_pr_reason": null, "confidence": 0.83, "soft_edge_verdicts": [{"accept": false, "left": "issue:27088", "reason": "Different specificity: Turkish docs translation vs a generic placeholder language template; not the same underlying issue.", "right": "issue:44370"}, {"accept": false, "left": "issue:27088", "reason": "Different specificity: Turkish docs translation vs a generic placeholder language template; not the same underlying issue.", "right": "issue:44365"}, {"accept": false, "left": "issue:27088", "reason": "Different specificity: Turkish docs translation vs a generic placeholder language template; not the same underlying issue.", "right": "issue:43834"}, {"accept": false, "left": "issue:43834", "reason": "They are very similar templates, but the records appear to be separate placeholder-style issues rather than one concrete duplicate request.", "right": "issue:44370"}, {"accept": false, "left": "issue:43834", "reason": "They are very similar templates, but the records appear to be separate placeholder-style issues rather than one concrete duplicate request.", "right": "issue:44365"}, {"accept": false, "left": "issue:44365", "reason": "They are very similar templates, but the records appear to be separate placeholder-style issues rather than one concrete duplicate request.", "right": "issue:44370"}], "summary": "These are not clean duplicates: 27088 is a concrete Turkish docs translation request, while 43834/44365/44370 look like placeholder/template internationalization issues. They share wording, but not the same specific request."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:27088|issue:44370", "issue:27088|issue:44365", "issue:27088|issue:43834", "issue:43834|issue:44370", "issue:43834|issue:44365", "issue:44365|issue:44370"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7320, "estimated_input_tokens": 3532, "item_count": 13, "node_count": 13, "serialized_chars": 14125, "soft_pair_count": 24}, "cached_at": "2026-04-14T13:12:08Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f502bcd8f573d568eef42e7050ae4407a9fef96a2e39b4a468f76b0e208c6ce6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36817", "issue:37015", "issue:41211", "issue:42503", "issue:42738", "issue:43671", "issue:43837", "issue:43909", "issue:44121", "issue:44205", "issue:45027", "issue:45177", "issue:45322"], "result": {"analyst_result": {"best_issue_reason": "#42738 is the most generic/high-level request in the set (\u201cBERT-like models with RoPE\u201d), so it is the closest thing to an umbrella issue. Still, it should be treated only as a representative, not as equivalent to the other model-specific requests.", "best_pr_reason": null, "canonical_issue_reason": "No single issue cleanly represents the whole set because the requests target different models and even different modalities. If one must be chosen, issue #42738 is the broadest umbrella-style request, but it is not a true duplicate of the others.", "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:43909", "reason": "Different concrete models and tasks: LFM2.5 Audio 1.5B vs SAM3-LiteText. Same broad request style, but not the same change.", "right": "issue:44205"}, {"accept": false, "left": "issue:43671", "reason": "Both are Qwen3 requests, but one is TTS and the other ASR; separate support paths and likely separate code changes.", "right": "issue:43837"}, {"accept": false, "left": "issue:44121", "reason": "Unrelated model families and purposes: weight-sparse transformer vs EUPE perception encoder.", "right": "issue:45322"}, {"accept": false, "left": "issue:43837", "reason": "ASR support vs audio model support; different model targets and implementation needs.", "right": "issue:43909"}, {"accept": false, "left": "issue:43837", "reason": "Qwen3-ASR and EUPE are unrelated model requests with no shared underlying bug/change.", "right": "issue:45322"}, {"accept": false, "left": "issue:37015", "reason": "NeoBERT is a text encoder-style model request; LFM2.5 Audio is an audio model request.", "right": "issue:43909"}, {"accept": false, "left": "issue:43671", "reason": "TTS support request and DeepSeek-OCR2 are different modalities and different model integrations.", "right": "issue:45177"}, {"accept": false, "left": "issue:37015", "reason": "NeoBERT and Qwen3-ASR are unrelated concrete model support requests.", "right": "issue:43837"}, {"accept": false, "left": "issue:43671", "reason": "Qwen3-TTS and Voxtral-4B-TTS are both speech-related, but they are distinct models and not the same underlying request.", "right": "issue:45027"}, {"accept": false, "left": "issue:37015", "reason": "NeoBERT vs OpenAI weight-sparse transformer are distinct model additions with different architectures.", "right": "issue:44121"}, {"accept": false, "left": "issue:43837", "reason": "ASR support and a sparse transformer request do not describe the same code-path change.", "right": "issue:44121"}, {"accept": false, "left": "issue:43837", "reason": "Audio ASR support vs OCR model support are different tasks and different integrations.", "right": "issue:45177"}, {"accept": false, "left": "issue:43671", "reason": "TTS request and LFM2.5 Audio request overlap only broadly in audio, not in concrete model/change.", "right": "issue:43909"}, {"accept": false, "left": "issue:43671", "reason": "Speech generation support vs weight-sparse transformer support are unrelated changes.", "right": "issue:44121"}, {"accept": false, "left": "issue:41211", "reason": "DEIMv2 and Qwen3-TTS are different model requests with no shared underlying bug/change.", "right": "issue:43671"}, {"accept": false, "left": "issue:36817", "reason": "EuroBert config support and DEIMv2 support are separate model additions.", "right": "issue:41211"}, {"accept": false, "left": "issue:37015", "reason": "NeoBERT and ModernVBERT are different model families; similar naming, but not the same concrete request.", "right": "issue:42503"}, {"accept": false, "left": "issue:36817", "reason": "EuroBert config support is unrelated to Qwen3-TTS speech support.", "right": "issue:43671"}, {"accept": false, "left": "issue:37015", "reason": "NeoBERT and Voxtral-4B-TTS are different model support requests in different domains.", "right": "issue:45027"}, {"accept": false, "left": "issue:42738", "reason": "Generic BERT-like RoPE request is not the same as a specific TTS model integration.", "right": "issue:45027"}, {"accept": false, "left": "issue:42503", "reason": "ModernVBERT models and Qwen3-ASR are different model targets and likely different code paths.", "right": "issue:43837"}, {"accept": false, "left": "issue:42503", "reason": "ModernVBERT and generic BERT-like RoPE requests are related in theme, but not duplicates of the same change.", "right": "issue:42738"}, {"accept": false, "left": "issue:42503", "reason": "ModernVBERT models are unrelated to Qwen3-TTS speech support.", "right": "issue:43671"}], "summary": "These are mostly separate model-support requests, not one duplicate bug/change cluster. They share a generic \u201cadd model X\u201d pattern, but the concrete models and modalities differ (BERT-like text models, audio TTS/ASR, OCR, perception encoders, etc.)."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43909|issue:44205", "issue:43671|issue:43837", "issue:44121|issue:45322", "issue:43837|issue:43909", "issue:43837|issue:45322", "issue:37015|issue:43909", "issue:43671|issue:45177", "issue:37015|issue:43837", "issue:43671|issue:45027", "issue:37015|issue:44121", "issue:43837|issue:44121", "issue:43837|issue:45177", "issue:43671|issue:43909", "issue:43671|issue:44121", "issue:41211|issue:43671", "issue:36817|issue:41211", "issue:37015|issue:42503", "issue:36817|issue:43671", "issue:37015|issue:45027", "issue:42738|issue:45027", "issue:43837|issue:45027", "issue:42503|issue:43837", "issue:42503|issue:42738", "issue:42503|issue:43671"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 974, "estimated_input_tokens": 359, "item_count": 2, "node_count": 2, "serialized_chars": 1434, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:12:16Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "46e2e0697d6135e23b269850693def8fb0e5a2f967d1f48be0a1daf52733b41e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42503", "issue:45027"], "result": {"analyst_result": {"best_issue_reason": "Issue #45027 is the more actionable representative because it is an open, active request for a specific model support addition; #42503 is a separate, closed model request.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:42503", "reason": "Not the same underlying bug or change: #42503 asks to add ModernVBERT models, while #45027 requests support for Voxtral-4B-TTS-2603. They target different model families and different capabilities.", "right": "issue:45027"}], "summary": "The two issues are about different model families and different feature requests, so this cluster is not a duplicate set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42503|issue:45027"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3402, "estimated_input_tokens": 1573, "item_count": 6, "node_count": 6, "serialized_chars": 6290, "soft_pair_count": 8}, "cached_at": "2026-04-14T13:12:37Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9b1ff09fe644369d448cc5fea07851f9e896ea7f0c95cfa1a478b6d2e92d2761", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:37934", "issue:42936", "issue:43240", "issue:43668", "issue:43813", "issue:44031"], "result": {"analyst_result": {"best_issue_reason": "#43668 is the best single issue to surface as a representative example because it is concise and specific, but it should not be merged with the others.", "best_pr_reason": null, "canonical_issue_reason": "No clear canonical duplicate exists. If one issue must represent the set, #43668 is the most self-contained and broadly readable, but it is still a distinct bug from the others.", "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:37934", "reason": "Different problems: a question about Llama4TextL2Norm/RMSNorm behavior versus a ModernBERT config type-hint bug.", "right": "issue:43668"}, {"accept": false, "left": "issue:42936", "reason": "Different codepaths and symptoms: Mask2Former ignore_value handling versus kwargs not being forwarded to a loss function.", "right": "issue:43240"}, {"accept": false, "left": "issue:42936", "reason": "Unrelated: a model-specific ignore_value bug versus tokenizer regex warnings after a version change.", "right": "issue:44031"}, {"accept": false, "left": "issue:42936", "reason": "Unrelated: Mask2Former ignore_value usage versus a typo in a quantization operation name.", "right": "issue:43813"}, {"accept": false, "left": "issue:42936", "reason": "Different scope: model training-loss behavior versus a config annotation/type-hint correctness issue.", "right": "issue:43668"}, {"accept": false, "left": "issue:43240", "reason": "Different bugs: loss kwargs propagation versus an incorrect ModernBERTConfig type hint.", "right": "issue:43668"}, {"accept": false, "left": "issue:43668", "reason": "Different subsystems: config typing versus tokenizer regex warning behavior.", "right": "issue:44031"}, {"accept": false, "left": "issue:43668", "reason": "Different issues: a type-hint bug versus a typo in a quantization-related attribute name.", "right": "issue:43813"}], "summary": "These issues are only superficially similar; they cover unrelated bugs in different models/components (normalization semantics, loss kwargs propagation, tokenizer regex warnings, a quantization typo, and a config type-hint issue). I do not see any true duplicates in this cluster."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:37934|issue:43668", "issue:42936|issue:43240", "issue:42936|issue:44031", "issue:42936|issue:43813", "issue:42936|issue:43668", "issue:43240|issue:43668", "issue:43668|issue:44031", "issue:43668|issue:43813"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 996, "estimated_input_tokens": 370, "item_count": 2, "node_count": 2, "serialized_chars": 1480, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:12:47Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "714e5b28daf294f3551333290fc2419b5c0f5f27d8c8d9c79b7bd959a92ec742", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41084", "issue:43037"], "result": {"analyst_result": {"best_issue_reason": "If forced to pick a single representative, issue #43037 is more concrete and actionable; however, it is not a canonical duplicate of #41084.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:41084", "reason": "Different problem domains and no shared technical target: 'Set Block Decoding' is a generic decoding configuration request, while 'DeepSeek v3.2 support' is a model-support feature request.", "right": "issue:43037"}], "summary": "The two issues are not duplicates: one is a broad request about block decoding, while the other is a specific feature request for DeepSeek v3.2 support. They do not describe the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:41084|issue:43037"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1048, "estimated_input_tokens": 396, "item_count": 2, "node_count": 2, "serialized_chars": 1583, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:13:02Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a22d0128cf24809dc3f1d4d725b5b60ab51dcbec8d32a59160918b98f7ed342b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43598", "issue:43600"], "result": {"analyst_result": {"best_issue_reason": "If one must be chosen as the more central Trainer-behavior report, 43598 is slightly broader, but it does not subsume 43600.", "best_pr_reason": null, "canonical_issue_reason": "No clear canonical issue: 43598 is about `remove_unused_column` customizability, while 43600 is about FLOPS estimation for custom trainers.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43598", "reason": "Different problems: trainer column-removal customizability vs FLOPS counting for custom trainers. Shared Trainer context is too broad to treat as the same issue.", "right": "issue:43600"}], "summary": "The two closed issues touch different Trainer concerns: one about `remove_unused_column` customizability, the other about FLOPS estimation for custom trainers. They are related only at a very broad Trainer level, not the same underlying bug/change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43598|issue:43600"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1062, "estimated_input_tokens": 403, "item_count": 2, "node_count": 2, "serialized_chars": 1612, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:13:09Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3d38828a88e26a35985bf91a96afa10a29df840a1250438aa2f695cd4d60ca02", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43684", "issue:44405"], "result": {"analyst_result": {"best_issue_reason": "Issue #43684 is the best overall issue in this cluster because it is the broader model-registration request; #44405 is a narrower, separate request for sequence-classification support on Qwen3.5.", "best_pr_reason": null, "canonical_issue_reason": "Issue #43684 is the broader and more central request: it asks for Qwen3-Omni registration in AutoModel and AutoModelForConditionalGeneration, making it the better representative of the cluster theme.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43684", "reason": "Reject: these are different underlying feature requests. #43684 is about Qwen3-Omni registration for AutoModel/AutoModelForConditionalGeneration, while #44405 is about AutoModelForSequenceClassification support for Qwen3.5/Qwen3_5Config. They touch the same general subsystem but not the same concrete change.", "right": "issue:44405"}], "summary": "These are related only at a high level: both request missing AutoModel support for Qwen variants, but they target different model families and different auto-head classes, so they are not duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43684|issue:44405"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2388, "estimated_input_tokens": 1066, "item_count": 4, "node_count": 4, "serialized_chars": 4264, "soft_pair_count": 6}, "cached_at": "2026-04-14T13:13:24Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9a625b43020ce1cd52311608d2bd68316556226cd24a3d30b16e8b445243ce06", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43835", "issue:43845", "issue:43846", "issue:43859"], "result": {"analyst_result": {"best_issue_reason": "Issue 43859 is marginally the most useful single issue because it has the only recorded discussion activity, though all four issues are too sparse to be genuinely informative.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43845 is the earliest of the near-identical \"huggingface\" reports, so it is the cleanest anchor if one issue must represent the cluster.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43835", "reason": "Different titles and no shared concrete problem description; too little evidence to treat as the same issue.", "right": "issue:43845"}, {"accept": false, "left": "issue:43835", "reason": "Different titles and no substantive body details; the soft similarity is only superficial.", "right": "issue:43846"}, {"accept": false, "left": "issue:43835", "reason": "Different titles and no explicit bug/feature context tying them together.", "right": "issue:43859"}, {"accept": false, "left": "issue:43845", "reason": "Same generic title, but the bodies are too sparse to confirm they report the same underlying bug rather than repeated low-content submissions.", "right": "issue:43846"}, {"accept": false, "left": "issue:43845", "reason": "Titles are similar, but there is no concrete shared failure mode or target to justify merging.", "right": "issue:43859"}, {"accept": false, "left": "issue:43846", "reason": "Same as above: only superficial title overlap, with no actionable evidence of a single underlying issue.", "right": "issue:43859"}], "summary": "These are extremely low-information issues with generic or empty-ish titles and no concrete bug details. The three \"huggingface\" issues are near-duplicates by title, but there isn\u2019t enough substantive content to safely unify them as the same underlying report; \"Fett\" looks unrelated."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43835|issue:43845", "issue:43835|issue:43846", "issue:43835|issue:43859", "issue:43845|issue:43846", "issue:43845|issue:43859", "issue:43846|issue:43859"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3152, "estimated_input_tokens": 1448, "item_count": 5, "node_count": 5, "serialized_chars": 5791, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:13:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1499fc2dbc709f238c2e108cc179352eea57743da77ead55ad4529b3c900db02", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:44371", "issue:44418", "issue:44572", "issue:44573", "issue:44937"], "result": {"analyst_result": {"best_issue_reason": "Issue 44371 is the least specific and most cluster-representative placeholder; the others are either different junk text or a separate documentation-themed submission.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44371 is the most generic representative of the spam-like items, but the cluster overall lacks a real technical topic.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:44371", "reason": "Both are spam placeholders, but there is no evidence they report the same underlying issue or change.", "right": "issue:44572"}, {"accept": false, "left": "issue:44371", "reason": "Identical low-information spam titles are not enough to treat them as the same issue.", "right": "issue:44573"}, {"accept": false, "left": "issue:44371", "reason": "One is spam-like while the other is a documentation suggestion; they are not the same problem.", "right": "issue:44418"}, {"accept": false, "left": "issue:44371", "reason": "Different junk text and no shared technical problem; reject as duplicate.", "right": "issue:44937"}, {"accept": false, "left": "issue:44418", "reason": "Documentation enhancement request does not match the spam placeholder issue.", "right": "issue:44572"}, {"accept": false, "left": "issue:44418", "reason": "No shared underlying bug or feature change; one is a doc request, the other is spam.", "right": "issue:44573"}, {"accept": false, "left": "issue:44572", "reason": "Same generic spam title, but no evidence of a single concrete issue or change.", "right": "issue:44573"}, {"accept": false, "left": "issue:44572", "reason": "Different spam/junk submissions with no common technical content.", "right": "issue:44937"}, {"accept": false, "left": "issue:44573", "reason": "Different junk titles and no evidence they target the same underlying problem.", "right": "issue:44937"}], "summary": "This cluster is dominated by low-signal spam/junk issues with no substantive shared bug report or feature request. The items do not describe the same underlying problem, so the soft links should not be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44371|issue:44572", "issue:44371|issue:44573", "issue:44371|issue:44418", "issue:44371|issue:44937", "issue:44418|issue:44572", "issue:44418|issue:44573", "issue:44572|issue:44573", "issue:44572|issue:44937", "issue:44573|issue:44937"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1080, "estimated_input_tokens": 412, "item_count": 2, "node_count": 2, "serialized_chars": 1646, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:13:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "349c1fcacc27a238dc6ab7c667b3733b6b47663e884cd8f5fbed99f08adaa4e4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:45145", "issue:45151"], "result": {"analyst_result": {"best_issue_reason": "#45145 is the best issue to keep as the cluster representative because it was created first and has slightly more reference activity.", "best_pr_reason": null, "canonical_issue_reason": "#45145 is the earlier issue and has stronger linkage signal (1 inbound reference vs. 0), so it is the best canonical representative for this cluster.", "canonical_pr_reason": null, "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "issue:45145", "reason": "The evidence is too thin to confidently merge them as duplicates: they have identical titles and similar lengths, but the packet only shows soft-similarity and does not demonstrate clearly overlapping content or the same underlying issue.", "right": "issue:45151"}], "summary": "The two closed issues look related and may be duplicates: they share the exact title and have similar body sizes, but the packet only provides soft-similarity evidence rather than clear content overlap. #45145 remains the best canonical issue because it was created first and has the only inbound reference."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": false, "feedback": "The clustering is plausible, but the rationale overstates the evidence. The packet only shows identical titles, similar body lengths, and a soft-similarity signal; it does not demonstrate that the issue contents are \"clearly overlapping\" or \"substantially similar.\" The canonical choice of #45145 is fine based on earlier creation and inbound references, but the soft-edge verdict should be phrased more conservatively given deterministic_accept=false."}, "evaluator_used": true, "retried": true}, "soft_pairs": ["issue:45145|issue:45151"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3210, "estimated_input_tokens": 1477, "item_count": 2, "node_count": 2, "serialized_chars": 5908, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:15:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5cf2c80cd0e91f502dbd87bd599f71b60e70ac692470993128570a72e03655fd", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:26617", "pull_request:29095"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 26617 is the cleaner representative because it is a focused CharacterBERT addition, whereas PR 29095 is a broader RWKV5 draft with many ancillary README/token file changes.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the changes target different models and different code paths; the shared auto/docs edits are incidental.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:26617", "reason": "Reject: these are different model integrations with only superficial overlap in shared registration/docs files, so they do not fix the same underlying change.", "right": "pull_request:29095"}], "summary": "The two PRs both touch shared Transformers registration/docs files, but they implement different model additions (CharacterBERT vs RWKV5) and are not duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:26617|pull_request:29095"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20080, "estimated_input_tokens": 9912, "item_count": 18, "node_count": 18, "serialized_chars": 39645, "soft_pair_count": 12}, "cached_at": "2026-04-14T13:16:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "519d5df6b2adcf0a8e213d7a426bf913b7ef5654d2aeec5b82a2bd6db0e10dd2", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43242", "pull_request:43251", "pull_request:43254", "pull_request:43303", "pull_request:43524", "pull_request:43989", "pull_request:44080", "pull_request:44118", "pull_request:44192", "pull_request:44193", "pull_request:44341", "pull_request:44388", "pull_request:44400", "pull_request:44429", "pull_request:44470", "pull_request:44690", "pull_request:45132", "pull_request:45315"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45315 is the best representative PR overall because it has the broadest scope and clearest end-state for the MoE router-softmax bug, with the same concrete fix applied consistently across multiple model implementations.", "canonical_issue_reason": null, "canonical_pr_reason": "There is no single canonical PR for the whole set because the items break into multiple unrelated subclusters. If one representative is needed, PR 45315 is the strongest overall representative of the largest subcluster: it applies the same MoE router-logit fix across the broadest set of models and reads like the most complete version of that change.", "confidence": 0.68, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44470", "reason": "Same GLM-MOE-DSA scoring bug: both add the missing ReLU in the same forward path across the same files, so they are duplicates/same change.", "right": "pull_request:44690"}, {"accept": true, "left": "pull_request:44388", "reason": "Both patch the loading_report ANSI formatting issue for non-TTY output in the same function; the helper naming differs, but the bug and fix are the same.", "right": "pull_request:44429"}, {"accept": true, "left": "pull_request:45132", "reason": "Both address the same MoE router-logit misuse before top-k; 45315 is broader, but it is the same underlying bug and could plausibly be one combined PR.", "right": "pull_request:45315"}, {"accept": true, "left": "pull_request:43242", "reason": "Both fix fixed_cross_entropy failing to forward supported cross_entropy kwargs; they target the same code path and same behavior gap.", "right": "pull_request:43251"}, {"accept": true, "left": "pull_request:44080", "reason": "Both fix ModelOutput/dataclass fields not staying in sync when a previously-None field is later set; same underlying bug in the same class.", "right": "pull_request:44118"}, {"accept": true, "left": "pull_request:44341", "reason": "Same loading_report ANSI/non-TTY styling bug; the implementations differ, but they are clearly aimed at the same defect.", "right": "pull_request:44388"}, {"accept": true, "left": "pull_request:43989", "reason": "Both fix the AutoVideoProcessor lookup crash when torchvision-derived mappings are missing/None, so they address the same lookup-path failure.", "right": "pull_request:44193"}, {"accept": true, "left": "pull_request:44341", "reason": "Same loading_report formatting issue for non-TTY output; these are alternative implementations of the same fix.", "right": "pull_request:44429"}, {"accept": false, "left": "pull_request:43303", "reason": "Both are in DeepSpeed state-dict/weight-conversion code, but 43524 looks like a follow-up MoE-specific refinement rather than the same exact change set as 43303.", "right": "pull_request:43524"}, {"accept": true, "left": "pull_request:44118", "reason": "Both fix the same ModelOutput key-sync bug when assigning a previously-None dataclass field after initialization.", "right": "pull_request:44192"}, {"accept": true, "left": "pull_request:43251", "reason": "Both are the same fixed_cross_entropy kwargs-forwarding fix; 43254 is just a more explicit version of 43251.", "right": "pull_request:43254"}, {"accept": true, "left": "pull_request:44341", "reason": "Same loading_report ANSI/style handling bug for interactive vs non-interactive output; these are duplicate fixes.", "right": "pull_request:44400"}], "summary": "The set is not one coherent duplicate cluster; it contains several separate PR chains around distinct bugs. I accepted the pairs that clearly fix the same code-path problem, and rejected the DeepSpeed pair as a likely follow-up rather than the same exact change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44470|pull_request:44690", "pull_request:44388|pull_request:44429", "pull_request:45132|pull_request:45315", "pull_request:43242|pull_request:43251", "pull_request:44080|pull_request:44118", "pull_request:44341|pull_request:44388", "pull_request:43989|pull_request:44193", "pull_request:44341|pull_request:44429", "pull_request:43303|pull_request:43524", "pull_request:44118|pull_request:44192", "pull_request:43251|pull_request:43254", "pull_request:44341|pull_request:44400"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18968, "estimated_input_tokens": 9356, "item_count": 17, "node_count": 17, "serialized_chars": 37423, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:17:48Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "98298e4ab9251b8dce7717d12d43d4ea65d89490df8fb4bc5f82528f8457fc00", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43242", "pull_request:43254", "pull_request:43303", "pull_request:43524", "pull_request:43926", "pull_request:44080", "pull_request:44192", "pull_request:44194", "pull_request:44203", "pull_request:44334", "pull_request:44470", "pull_request:44564", "pull_request:44663", "pull_request:44690", "pull_request:45111", "pull_request:45131", "pull_request:45132"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "45132 is the strongest single PR anchor here because it directly fixes the core router softmax mistake with validation, while broader PRs in the cluster either cover different bugs or are less focused.", "canonical_issue_reason": null, "canonical_pr_reason": "45132 is the cleanest representative of the main MoE double-softmax bug: it is focused, names the affected models, and includes tests.", "confidence": 0.84, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44334", "reason": "Same missing `tokenizer_class` assignment in the same file; these are the same underlying fix for the cookiecutter/model info bug.", "right": "pull_request:44663"}, {"accept": true, "left": "pull_request:44194", "reason": "Both address the same optimizer-args-not-being-used bug in `trainer_optimizer.py`; 44203 is a broader implementation of the same fix family.", "right": "pull_request:44203"}, {"accept": true, "left": "pull_request:44080", "reason": "Same `ModelOutput` attribute/key sync bug in `generic.py`; both patch the same underlying `__setattr__` behavior.", "right": "pull_request:44192"}, {"accept": true, "left": "pull_request:43242", "reason": "Both fix the same `fixed_cross_entropy` kwargs passthrough issue; one filters supported kwargs, the other adds them explicitly.", "right": "pull_request:43254"}, {"accept": true, "left": "pull_request:44564", "reason": "Same missing ReLU in GLM-MOE-DSA indexer scoring, applied to the same code path in the same two files.", "right": "pull_request:44690"}, {"accept": false, "left": "pull_request:43303", "reason": "Related DeepSpeed weight-conversion work, but 43926 looks like a later refinement/follow-up to the new path rather than the same concrete bug fix.", "right": "pull_request:43926"}, {"accept": true, "left": "pull_request:44470", "reason": "Same GLM-MOE-DSA ReLU omission in the scoring path; the diffs are effectively the same fix with minor wording/API differences.", "right": "pull_request:44564"}, {"accept": false, "left": "pull_request:43524", "reason": "Both are in DeepSpeed weight conversion, but they address different subproblems and do not look like the same concrete fix that could be merged as one PR.", "right": "pull_request:43926"}, {"accept": true, "left": "pull_request:45111", "reason": "Same MoE router double-softmax bug with overlapping affected models; 45132 is a narrower subset of 45111's fix.", "right": "pull_request:45132"}, {"accept": false, "left": "pull_request:45131", "reason": "Different bugs: 45131 fixes routers returning probabilities instead of logits, while 45132 fixes double softmax in the load-balancing loss.", "right": "pull_request:45132"}], "summary": "This cluster is mostly several independent duplicate PR pairs across different subsystems, not one single bug. I accepted the pairs that clearly fix the same concrete code path, and rejected the MoE output-vs-loss mismatch pair as a different issue."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44334|pull_request:44663", "pull_request:44194|pull_request:44203", "pull_request:44080|pull_request:44192", "pull_request:43242|pull_request:43254", "pull_request:44564|pull_request:44690", "pull_request:43303|pull_request:43926", "pull_request:44470|pull_request:44564", "pull_request:43524|pull_request:43926", "pull_request:45111|pull_request:45132", "pull_request:45131|pull_request:45132"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20384, "estimated_input_tokens": 10064, "item_count": 17, "node_count": 17, "serialized_chars": 40253, "soft_pair_count": 15}, "cached_at": "2026-04-14T13:18:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "da299dea2e8f74c9a26ceb37a56d1479afb1c41868ef13c68246625b1d7e7444", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42781", "pull_request:43400", "pull_request:43403", "pull_request:43453", "pull_request:43580", "pull_request:43998", "pull_request:44000", "pull_request:44001", "pull_request:44002", "pull_request:44003", "pull_request:44004", "pull_request:44028", "pull_request:44439", "pull_request:44848", "pull_request:44934", "pull_request:45048", "pull_request:45053"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44848 is the strongest standalone fix PR in the set: it is merged, has the highest discussion/review activity, and addresses a concrete generation/device bug in code rather than just test updates.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR is a clear canonical duplicate target; the set spans unrelated fixes/features across different models and test-only changes.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43453", "reason": "Different fixes in different areas: config/pad_token_id plumbing for GLM/Qwen/VL models versus integration-test expectation updates for Qwen2/T5.", "right": "pull_request:44934"}, {"accept": false, "left": "pull_request:43998", "reason": "Both are output-tracing refactors, but they touch different models and code paths (timm_backbone vs mamba/falcon_mamba), so they are not the same change.", "right": "pull_request:44003"}, {"accept": false, "left": "pull_request:43453", "reason": "Unrelated: GLM/Qwen padding-token config fix versus a SmolLM3 test expectation/update for generation behavior.", "right": "pull_request:45048"}, {"accept": false, "left": "pull_request:44001", "reason": "Different model implementations and different changes; univnet output tracing is not the same bug as mamba/falcon_mamba output-capture refactoring.", "right": "pull_request:44003"}, {"accept": false, "left": "pull_request:44002", "reason": "UperNet forward-signature/output-tracing cleanup is a separate model-specific refactor, not the same underlying change as the mamba output-capturing work.", "right": "pull_request:44003"}, {"accept": false, "left": "pull_request:44000", "reason": "Vision-text dual encoder refactor is a different model and code path from mamba/falcon_mamba output tracing.", "right": "pull_request:44003"}, {"accept": false, "left": "pull_request:44003", "reason": "Both relate to output capture, but they are model-specific refactors in different architectures (mamba vs superpoint) and do not fix the same bug.", "right": "pull_request:44028"}, {"accept": false, "left": "pull_request:43403", "reason": "Same LightOn OCR test file, but different issues: one fixes image_sizes preparation, the other disables torch export for an incompatible vision encoder.", "right": "pull_request:43580"}, {"accept": false, "left": "pull_request:44848", "reason": "Different scopes and bugs: Qwen3 Omni MoE/meta-device generation fix versus unrelated Qwen2/T5 integration-test expectation changes.", "right": "pull_request:44934"}, {"accept": false, "left": "pull_request:44934", "reason": "Qwen2/T5 test adjustments are unrelated to the XCLIP processing_utils change; they do not share the same code path or failure.", "right": "pull_request:45053"}, {"accept": false, "left": "pull_request:44003", "reason": "Different model families and different tracing refactors: mamba/falcon_mamba versus codegen.", "right": "pull_request:44004"}, {"accept": false, "left": "pull_request:44000", "reason": "Vision-text dual encoder output refactor is unrelated to codegen output-tracing changes.", "right": "pull_request:44004"}, {"accept": false, "left": "pull_request:44439", "reason": "ProphetNet integration-test fix is unrelated to the Qwen2/T5 test updates.", "right": "pull_request:44934"}, {"accept": false, "left": "pull_request:42781", "reason": "Related VibeVoice family, but not the same change: 42781 adds the main realtime model stack while 43400 adds the acoustic tokenizer and its tests/docs.", "right": "pull_request:43400"}, {"accept": false, "left": "pull_request:43453", "reason": "GLM/Qwen pad_token_id config plumbing is unrelated to the ProphetNet integration-test fix.", "right": "pull_request:44439"}], "summary": "No true duplicate pairs here; the soft matches are mostly broad thematic overlaps (same model family, same refactor pattern, or the same test file) rather than the same underlying bug/change. The closest pair is the VibeVoice PRs, but they are still separate deliverables."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43453|pull_request:44934", "pull_request:43998|pull_request:44003", "pull_request:43453|pull_request:45048", "pull_request:44001|pull_request:44003", "pull_request:44002|pull_request:44003", "pull_request:44000|pull_request:44003", "pull_request:44003|pull_request:44028", "pull_request:43403|pull_request:43580", "pull_request:44848|pull_request:44934", "pull_request:44934|pull_request:45053", "pull_request:44003|pull_request:44004", "pull_request:44000|pull_request:44004", "pull_request:44439|pull_request:44934", "pull_request:42781|pull_request:43400", "pull_request:43453|pull_request:44439"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22762, "estimated_input_tokens": 11253, "item_count": 18, "node_count": 18, "serialized_chars": 45011, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:19:00Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "06f383ab6671fe552f017570e0e0c0f196fdcb13c9a0287ec4c931b2cf5b4fa5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40546", "pull_request:43030", "pull_request:43349", "pull_request:43400", "pull_request:43438", "pull_request:43453", "pull_request:43764", "pull_request:44030", "pull_request:44095", "pull_request:44330", "pull_request:44456", "pull_request:44515", "pull_request:44519", "pull_request:44675", "pull_request:44759", "pull_request:44828", "pull_request:44848", "pull_request:45336"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44848 is the best overall PR to anchor this cluster because it is the most substantial and central code change among otherwise unrelated items, with clear runtime impact and high review activity.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44848 is the strongest standalone representative here: it has the most discussion, includes a concrete generation/runtime fix, and is broader than the docs/test-only or narrowly scoped follow-ups. Even so, the overall cluster is not a true single-topic duplicate set.", "confidence": 0.81, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44759", "reason": "Both are consecutive chunks of the same codebase-wide change removing `cache_position` from model implementations, so they look like the same underlying refactor split across PRs.", "right": "pull_request:44828"}, {"accept": false, "left": "pull_request:43453", "reason": "These address different problems: adding `pad_token_id` plumbing for failing tests vs. a Qwen3 Omni MoE fix in generation/model code.", "right": "pull_request:44848"}, {"accept": false, "left": "pull_request:43349", "reason": "One removes device synchronizations in FP8 matmul; the other changes expert backend behavior on CPU. Different code paths and different fixes.", "right": "pull_request:43438"}, {"accept": false, "left": "pull_request:44519", "reason": "Marian integration test adjustments are unrelated to the Qwen3 Omni MoE generation/meta-device fix.", "right": "pull_request:44848"}, {"accept": false, "left": "pull_request:40546", "reason": "These are separate VibeVoice artifacts: the main model implementation vs. the acoustic tokenizer. Related family, but not the same change.", "right": "pull_request:43400"}, {"accept": false, "left": "pull_request:44456", "reason": "A DepthPro integration-test fix is unrelated to the Qwen3 Omni MoE runtime/generation issue.", "right": "pull_request:44848"}, {"accept": false, "left": "pull_request:44515", "reason": "GPTNeo test-only generation adjustment is unrelated to the Qwen3 Omni MoE fix.", "right": "pull_request:44848"}, {"accept": false, "left": "pull_request:44095", "reason": "These are different fixes: loading/initialization logic vs. Gemma4 shared-weight removal and load-time handling.", "right": "pull_request:45336"}, {"accept": false, "left": "pull_request:43030", "reason": "Both concern ModernBERT, but one is an attention-interface refactor and the other changes the default attention implementation; they are not the same concrete bug/change.", "right": "pull_request:43764"}, {"accept": true, "left": "pull_request:44330", "reason": "Both PRs continue the same `cache_position` removal refactor across model implementations and could plausibly have been merged as one broader PR.", "right": "pull_request:44759"}, {"accept": false, "left": "pull_request:44030", "reason": "DPR output-tracing refactor and continuous-batching docs updates are unrelated.", "right": "pull_request:44675"}], "summary": "This cluster is mostly heterogeneous. The only clear duplicate-like pairs are the successive \"Remove cache_position in more models\" follow-up PRs, which are the same refactor split into chunks. The other soft-similarity pairs are only thematically related or clearly different changes, so they should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44759|pull_request:44828", "pull_request:43453|pull_request:44848", "pull_request:43349|pull_request:43438", "pull_request:44519|pull_request:44848", "pull_request:40546|pull_request:43400", "pull_request:44456|pull_request:44848", "pull_request:44515|pull_request:44848", "pull_request:44095|pull_request:45336", "pull_request:43030|pull_request:43764", "pull_request:44330|pull_request:44759", "pull_request:44030|pull_request:44675"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22928, "estimated_input_tokens": 11336, "item_count": 18, "node_count": 18, "serialized_chars": 45342, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:19:34Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e33194c59564d11f17731338a0eefa6766fade83444e6d7842d3a03b2a512545", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40546", "pull_request:42781", "pull_request:43030", "pull_request:43426", "pull_request:43453", "pull_request:43672", "pull_request:44000", "pull_request:44002", "pull_request:44029", "pull_request:44414", "pull_request:44527", "pull_request:44595", "pull_request:44662", "pull_request:44675", "pull_request:45212", "pull_request:45214", "pull_request:45340", "pull_request:45401"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No PR is a good global representative for duplicate triage because the set is heterogeneous; the shared files do not indicate one underlying change.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR stands out: the items span different models and unrelated utility/refactor changes, with only superficial overlap in shared scaffolding files.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44662", "reason": "Different model onboarding PRs (PenguinVL vs Voxtral TTS); shared auto/docs files are just registry scaffolding, not the same change.", "right": "pull_request:45401"}, {"accept": false, "left": "pull_request:40546", "reason": "Both are VibeVoice-related, but one is the base implementation and the other adds a realtime variant with additional tokenizer/feature extractor/model changes; not the same concrete fix.", "right": "pull_request:42781"}, {"accept": false, "left": "pull_request:43030", "reason": "ModernBERT attention-interface refactor vs PE Audio logits device fix; only a shared test file appears incidental.", "right": "pull_request:43672"}, {"accept": false, "left": "pull_request:44000", "reason": "Vision-text dual encoder output-tracing refactor and a docs-only config update are unrelated.", "right": "pull_request:44675"}, {"accept": false, "left": "pull_request:44002", "reason": "UperNet output-tracing refactor vs docs/config cleanup; no shared bug or code path.", "right": "pull_request:44675"}, {"accept": false, "left": "pull_request:44414", "reason": "Model-loading verbosity change and conversion-mapping fixes affect different subsystems and problems.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44029", "reason": "RWKV output-tracing refactor is unrelated to the docs/config PR.", "right": "pull_request:44675"}, {"accept": false, "left": "pull_request:43453", "reason": "Pad token config fixes across several models and a Musicgen test workaround are different issues.", "right": "pull_request:44527"}, {"accept": false, "left": "pull_request:45212", "reason": "Both are device/test-related, but they fix different model-specific paths (MusicFlamingo XPU tests vs Cohere ASR beam-search position embeddings).", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenizer cleanup restoration and conversion-mapping fixes are unrelated changes.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44595", "reason": "Both add new model docs/registries, but for different models with no shared underlying bug or change.", "right": "pull_request:45401"}], "summary": "The cluster is a grab bag of unrelated PRs: model additions, docs/registry updates, and a few small refactors or test fixes. The only overlaps are broad scaffolding files like auto registries or shared test utilities, not the same concrete bug/change. None of the proposed soft edges look like true duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44662|pull_request:45401", "pull_request:40546|pull_request:42781", "pull_request:43030|pull_request:43672", "pull_request:44000|pull_request:44675", "pull_request:44002|pull_request:44675", "pull_request:44414|pull_request:45340", "pull_request:44029|pull_request:44675", "pull_request:43453|pull_request:44527", "pull_request:45212|pull_request:45214", "pull_request:43426|pull_request:45340", "pull_request:44595|pull_request:45401"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20858, "estimated_input_tokens": 10301, "item_count": 18, "node_count": 18, "serialized_chars": 41201, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:19:52Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2e1c14fb1d06859dc3890d5e322b014fefc199049794a5ae8a32735cd65c3fe0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43339", "pull_request:43426", "pull_request:43453", "pull_request:43464", "pull_request:43592", "pull_request:43615", "pull_request:44001", "pull_request:44095", "pull_request:44229", "pull_request:44414", "pull_request:44497", "pull_request:44662", "pull_request:44675", "pull_request:44801", "pull_request:44934", "pull_request:45044", "pull_request:45075", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43453", "reason": "Different fixes: one addresses missing `pad_token_id` on several configs/models, the other adjusts MarkupLM integration test dtype. Same area of tests, but not the same underlying bug.", "right": "pull_request:43464"}, {"accept": false, "left": "pull_request:44095", "reason": "Distinct model-loading behaviors: remote-code weight initialization vs tie-weight handling when both weights already exist. Related file, but separate code paths and merge intents.", "right": "pull_request:44497"}, {"accept": false, "left": "pull_request:44001", "reason": "UnivNet output-tracing refactor is unrelated to continuous batching doc updates. No shared bug or implementation change.", "right": "pull_request:44675"}, {"accept": false, "left": "pull_request:44662", "reason": "Both are model implementation PRs, but for different models entirely (PenguinVL vs DeepSeek-OCR-2). Not duplicates and not mergeable into one change.", "right": "pull_request:45075"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenization cleanup restoration and model-loading tqdm verbosity are unrelated. Shared repo area only, not the same issue.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:45044", "reason": "Janus generation bug fix is separate from conversion-mapping updates for VLM checkpoints. Different runtime behavior and different target code.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43339", "reason": "Both add test expectations, but for different models and different behaviors (LW-DETR XPU vs MixedInt8 GPT2 outputs). Not the same underlying failure.", "right": "pull_request:43615"}, {"accept": false, "left": "pull_request:44229", "reason": "Docstring backtick fix in AnyToAnyPipeline is unrelated to continuous batching config docs. No common bug/change.", "right": "pull_request:44675"}, {"accept": false, "left": "pull_request:43592", "reason": "Configuration default-value cleanup across multiple models is unrelated to conversion-mapping fixes for VLMs. Different scope and effect.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44801", "reason": "Tokenizer auto-class mapping corrections and conversion mapping fixes are separate registry/dispatch issues. They do not fix the same concrete problem.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43464", "reason": "Both are integration-test fixes, but for different models and different failure modes. No shared underlying bug.", "right": "pull_request:44934"}], "summary": "The cluster is a collection of unrelated PRs from different subsystems (model additions, docs-only fixes, test expectation updates, and separate core bug fixes). The soft pairs are all false positives: they share only broad themes like tests, docs, or core utilities, not the same concrete bug/change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43453|pull_request:43464", "pull_request:44095|pull_request:44497", "pull_request:44001|pull_request:44675", "pull_request:44662|pull_request:45075", "pull_request:43426|pull_request:44414", "pull_request:45044|pull_request:45340", "pull_request:43339|pull_request:43615", "pull_request:44229|pull_request:44675", "pull_request:43592|pull_request:45340", "pull_request:44801|pull_request:45340", "pull_request:43464|pull_request:44934"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19210, "estimated_input_tokens": 9477, "item_count": 18, "node_count": 18, "serialized_chars": 37908, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:20:22Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "64bc50b44797c8d40acba9de1f10b5ac0a5803d2db2d080c067eba1d846f49df", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43030", "pull_request:43324", "pull_request:43426", "pull_request:43579", "pull_request:43614", "pull_request:43615", "pull_request:43821", "pull_request:43861", "pull_request:43938", "pull_request:43956", "pull_request:44037", "pull_request:44321", "pull_request:44353", "pull_request:44428", "pull_request:44482", "pull_request:44536", "pull_request:44801", "pull_request:45214"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No global best PR is suitable because this is not one underlying bug/change set; 43821 is only the best representative for the PEFT typo-fix pair, not for the entire cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR for the full cluster: the items span unrelated changes in different models, tests, and utilities. Only one soft edge is a true duplicate pair.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43956", "reason": "Same target model area, but different concrete fixes: one rewrites qwen3_vl_moe weight mapping, the other adds transpose dim checks/alignment handling. Not the same underlying change.", "right": "pull_request:44037"}, {"accept": false, "left": "pull_request:43324", "reason": "Different models and different test adjustments; both are XPU-related but do not fix the same bug or code path.", "right": "pull_request:44321"}, {"accept": false, "left": "pull_request:43614", "reason": "Unrelated fixes in different models: diffllama contiguous-input handling vs olmo_hybrid XPU expectations.", "right": "pull_request:44353"}, {"accept": false, "left": "pull_request:43030", "reason": "ModernBERT attention refactor is unrelated to tokenizer-class hub metadata fixes; same broad release does not imply duplicate change.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:43579", "reason": "One adds XPU test expectations for solar_open, the other broadens a CPU-only skip guard for tensor-parallel tests; different issues and code paths.", "right": "pull_request:44536"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenization backend cleanup behavior is unrelated to hub tokenizer-class registration for specific models.", "right": "pull_request:44801"}, {"accept": true, "left": "pull_request:43821", "reason": "These are the same PEFT typo fix in the same file/lines, correcting quantizatin_operations to quantization_operations; they could be merged as one PR.", "right": "pull_request:43861"}, {"accept": false, "left": "pull_request:43615", "reason": "Different models and different problems: mixed-int8 GPT2 expected output updates vs cohere_asr positional-embedding device placement.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:43614", "reason": "DiffLlama xpu contiguous-attention fix and Higgs audio XPU expectation updates are unrelated changes.", "right": "pull_request:44482"}, {"accept": false, "left": "pull_request:43615", "reason": "Both are XPU test expectation updates, but for different models and failure modes; not the same underlying bug.", "right": "pull_request:43938"}, {"accept": false, "left": "pull_request:43614", "reason": "DiffLlama compile-mode contiguity fix is unrelated to VibeVoice tokenizer fixture expectation updates.", "right": "pull_request:44428"}], "summary": "Mostly heterogeneous PRs across unrelated models/tests/docs; no single cluster-wide duplicate. The only clear duplicate soft edge is the repeated PEFT typo fix (43821/43861)."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43956|pull_request:44037", "pull_request:43324|pull_request:44321", "pull_request:43614|pull_request:44353", "pull_request:43030|pull_request:44801", "pull_request:43579|pull_request:44536", "pull_request:43426|pull_request:44801", "pull_request:43821|pull_request:43861", "pull_request:43615|pull_request:45214", "pull_request:43614|pull_request:44482", "pull_request:43615|pull_request:43938", "pull_request:43614|pull_request:44428"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18830, "estimated_input_tokens": 9287, "item_count": 18, "node_count": 18, "serialized_chars": 37148, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:20:44Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7343bb5d55dd76a01da3158b62fca1d14efa1bfaa035c1f0e7340f39aa053439", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42802", "pull_request:43339", "pull_request:43426", "pull_request:43464", "pull_request:43614", "pull_request:43896", "pull_request:44236", "pull_request:44250", "pull_request:44285", "pull_request:44320", "pull_request:44456", "pull_request:44490", "pull_request:44502", "pull_request:44515", "pull_request:44733", "pull_request:45049", "pull_request:45212", "pull_request:45284"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44285 is the best overall representative because it is the largest and most complete change, spans the full feature surface, and has the highest review/activity signal; the others are narrower fixes or isolated test updates.", "canonical_issue_reason": null, "canonical_pr_reason": "No issue is present, so the best canonical PR is the most substantial standalone change: PR 44285, which introduces VidEoMT across docs, auto classes, modeling, processors, and tests, making it the strongest representative artifact in this set.", "confidence": 0.18, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43614", "reason": "Both mention XPU-related attention/kernel behavior, but one is a DiffLlama contiguity fix and the other is a Jamba/Qwen2 flash-attention kernel mapping change. Different models and different concrete code paths.", "right": "pull_request:44733"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenization cleanup behavior and TrainingArguments `report_to=\"all\"` regression are unrelated features in different modules.", "right": "pull_request:44250"}, {"accept": false, "left": "pull_request:43896", "reason": "DAC expected-output test updates and an `is_torch_bf16_gpu_available` type-checker guard are unrelated changes with no shared bug.", "right": "pull_request:44502"}, {"accept": false, "left": "pull_request:43896", "reason": "Both adjust test expectations, but for different models and different backend/CI contexts; they do not look like the same underlying bug.", "right": "pull_request:45284"}, {"accept": false, "left": "pull_request:43339", "reason": "Both are XPU test-support changes, but for different model test suites and different fixtures/expectations. Too broad to be the same underlying change.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:44285", "reason": "Both are large model-addition PRs, but they add different models (VidEoMT vs SAM3-LiteText) with distinct implementations and docs.", "right": "pull_request:44320"}, {"accept": false, "left": "pull_request:44236", "reason": "Zero3 init/dependency setup and RoPE kwargs handling are unrelated fixes in different parts of the stack.", "right": "pull_request:45049"}, {"accept": false, "left": "pull_request:43464", "reason": "Both fix failing integration tests, but for different models and different failure modes (MarkupLM dtype loading vs DepthPro dtype loading).", "right": "pull_request:44456"}, {"accept": false, "left": "pull_request:43464", "reason": "Different model tests and different execution issues; sharing the same 'failing integration test' pattern is not enough to merge them.", "right": "pull_request:44515"}, {"accept": false, "left": "pull_request:42802", "reason": "Lasr flex-attn support flagging and EuroBERT model-parallel mask-device handling are separate model-specific fixes with no common code path.", "right": "pull_request:44490"}], "summary": "These PRs are mostly unrelated one-off fixes, test expectation updates, or new model additions across different subsystems; there isn\u2019t a strong duplicate cluster here. The only mild commonality is that several are backend/device-specific test adjustments, but they affect different models and code paths."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43614|pull_request:44733", "pull_request:43426|pull_request:44250", "pull_request:43896|pull_request:44502", "pull_request:43896|pull_request:45284", "pull_request:43339|pull_request:45212", "pull_request:44285|pull_request:44320", "pull_request:44236|pull_request:45049", "pull_request:43464|pull_request:44456", "pull_request:43464|pull_request:44515", "pull_request:42802|pull_request:44490"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18476, "estimated_input_tokens": 9110, "item_count": 17, "node_count": 17, "serialized_chars": 36440, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:21:05Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a8a6a384917b2c70b5b2565b32335186f6cded3b97ea228f5960493146590401", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42230", "pull_request:43578", "pull_request:43614", "pull_request:43635", "pull_request:43896", "pull_request:44001", "pull_request:44002", "pull_request:44025", "pull_request:44026", "pull_request:44033", "pull_request:44229", "pull_request:44235", "pull_request:44321", "pull_request:44414", "pull_request:44801", "pull_request:44833", "pull_request:45214"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45214 is the best representative PR because it is a clear end-user bug fix with a precise code-path change and validation, unlike the more refactor- or maintenance-oriented PRs.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45214 is the strongest standalone fix: it addresses a concrete runtime/device-placement bug in cohere_asr, touches the actual model code in two implementations, and includes test coverage.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44321", "reason": "Both are test-related, but 44321 skips invalid VoxtralRealtime tests while 45214 fixes a device mismatch in cohere_asr forward; different models and different problems.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:44235", "reason": "Both edit tokenization_auto, but 44235 adds fuyu registration only, while 44801 adds deepseek_v2/v3 and modernbert plus backend tests; not the same change.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:44001", "reason": "44001 refactors UnivNet output tracing, while 44229 only fixes a missing backtick in a pipeline docstring; unrelated changes.", "right": "pull_request:44229"}, {"accept": false, "left": "pull_request:44002", "reason": "UperNet output-tracing refactor vs. docstring punctuation fix in AnyToAnyPipeline; no shared concrete bug.", "right": "pull_request:44229"}, {"accept": false, "left": "pull_request:44025", "reason": "Depth Anything tracing refactor and a docstring backtick fix are different kinds of changes, not one underlying bug.", "right": "pull_request:44229"}, {"accept": false, "left": "pull_request:44026", "reason": "VisionEncoderDecoder tracing refactor and a docstring typo fix do not address the same code-path problem.", "right": "pull_request:44229"}, {"accept": false, "left": "pull_request:42230", "reason": "Both mention xpu/CUDA SDPA behavior, but one changes attention mask unmasking and the other adds contiguity for DiffLlama tensors; different concrete fixes.", "right": "pull_request:43614"}, {"accept": false, "left": "pull_request:43896", "reason": "43896 updates DAC expected outputs due to CI settings, while 44833 re-runs the modular converter across examples; distinct maintenance tasks.", "right": "pull_request:44833"}, {"accept": false, "left": "pull_request:43635", "reason": "Both touch PR comment CI, but 43635 threads explicit PR info into bad-commit analysis while 44033 improves notification formatting/analysis flow; not the same bug or mergeable PR.", "right": "pull_request:44033"}, {"accept": false, "left": "pull_request:43578", "reason": "Youtu test repo alignment and model-loading tqdm verbosity reduction are separate maintenance changes with different goals and code paths.", "right": "pull_request:44414"}], "summary": "No soft-edge pair looks like a true duplicate: most share a broad subsystem, but they fix different concrete bugs or make unrelated maintenance/test changes. I would keep them separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44321|pull_request:45214", "pull_request:44235|pull_request:44801", "pull_request:44001|pull_request:44229", "pull_request:44002|pull_request:44229", "pull_request:44025|pull_request:44229", "pull_request:44026|pull_request:44229", "pull_request:42230|pull_request:43614", "pull_request:43896|pull_request:44833", "pull_request:43635|pull_request:44033", "pull_request:43578|pull_request:44414"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18108, "estimated_input_tokens": 8926, "item_count": 17, "node_count": 17, "serialized_chars": 35701, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:21:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "420e4dbf99bd6e5d171c6ea7f4f2984177f1a58ffdd473206a1da67bac27ddc6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41912", "pull_request:42668", "pull_request:43426", "pull_request:43500", "pull_request:43553", "pull_request:43554", "pull_request:43565", "pull_request:43578", "pull_request:43580", "pull_request:43615", "pull_request:43953", "pull_request:43956", "pull_request:44414", "pull_request:44482", "pull_request:45061", "pull_request:45075", "pull_request:45401"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43426 is the strongest standalone PR here: merged, concrete behavior fix in tokenizers backend, and accompanied by tests.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43956 is the narrower, test-backed fix for the same qwen3_vl_moe weight-mapping path and is the better canonical representative of that duplicate pair.", "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45075", "reason": "Both are new-model add-ons, but they introduce different models and code paths; shared auto/docs files are just framework scaffolding, not the same change.", "right": "pull_request:45401"}, {"accept": false, "left": "pull_request:43426", "reason": "Completely different concerns: tokenizers cleanup behavior vs. updating Youtu test repos.", "right": "pull_request:43578"}, {"accept": false, "left": "pull_request:43500", "reason": "Both mention bot/workflow permissions, but one is a test-file permission hack and the other changes workflow permissions; not the same underlying fix.", "right": "pull_request:43565"}, {"accept": false, "left": "pull_request:43500", "reason": "The PRs touch different artifacts and purposes: a test-file change versus adding a style-bot workflow.", "right": "pull_request:43553"}, {"accept": true, "left": "pull_request:43953", "reason": "Both fix the same qwen3_vl_moe conversion-mapping code path in conversion_mapping.py; the second PR refines the same weight-conversion logic and could plausibly be merged as one fix.", "right": "pull_request:43956"}, {"accept": false, "left": "pull_request:41912", "reason": "Unrelated issues: T5 hidden-state dtype restoration versus disabling torch export tests for a different model.", "right": "pull_request:43580"}, {"accept": false, "left": "pull_request:43500", "reason": "These are separate workflow changes with different intended effects; not a single concrete bug fix.", "right": "pull_request:43554"}, {"accept": false, "left": "pull_request:42668", "reason": "Different subsystems and problems: processor auto-loading robustness vs. model-loading verbosity.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:44414", "reason": "Model-loading tqdm verbosity and flash-attn deprecation support are unrelated changes.", "right": "pull_request:45061"}, {"accept": false, "left": "pull_request:43615", "reason": "Both add XPU test expectations, but for different models and different failures; they do not fix the same underlying bug.", "right": "pull_request:44482"}], "summary": "Mostly unrelated pull requests; the only clear duplicate-like pair is the Qwen3VL-MoE conversion-mapping fixes."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45075|pull_request:45401", "pull_request:43426|pull_request:43578", "pull_request:43500|pull_request:43565", "pull_request:43500|pull_request:43553", "pull_request:43953|pull_request:43956", "pull_request:41912|pull_request:43580", "pull_request:43500|pull_request:43554", "pull_request:42668|pull_request:44414", "pull_request:44414|pull_request:45061", "pull_request:43615|pull_request:44482"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20666, "estimated_input_tokens": 10205, "item_count": 17, "node_count": 17, "serialized_chars": 40819, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:22:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7fddbf34434ee47e8f42f4cb5aa1404a02d89826503218adfb12f68c1f899a5e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43071", "pull_request:43403", "pull_request:43438", "pull_request:44251", "pull_request:44285", "pull_request:44300", "pull_request:44412", "pull_request:44456", "pull_request:44490", "pull_request:44497", "pull_request:44515", "pull_request:44519", "pull_request:44566", "pull_request:44602", "pull_request:44675", "pull_request:44828", "pull_request:45336"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44602 is the strongest representative of the only real duplicate pair in the set and reflects the shared code-path cleanup most clearly.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44602 is the best anchor because it covers the broader earlier pass of the `cache_position` removal cleanup; 44828 is a continuation of the same change series.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44251", "reason": "Different model additions (`Jina-Embeddings-V3` vs `VidEoMT`) with different files and implementations; not the same change.", "right": "pull_request:44285"}, {"accept": false, "left": "pull_request:43403", "reason": "Unrelated fixes: a LightOn OCR test helper tweak versus an EuroBERT model-parallelism bug fix.", "right": "pull_request:44490"}, {"accept": false, "left": "pull_request:44412", "reason": "Both are typing chores, but they target different subsystems (`quantizers` vs `cli`) and different problems.", "right": "pull_request:44566"}, {"accept": false, "left": "pull_request:43071", "reason": "Both touch conversion logic, but one is a Mixtral renaming refactor and the other is a recursive weight-conversion behavior change; not the same bug.", "right": "pull_request:44300"}, {"accept": false, "left": "pull_request:44497", "reason": "Both involve weight tying/loading, but 45336 is Gemma4-specific shared-weight handling while 44497 changes generic tie-weights behavior.", "right": "pull_request:45336"}, {"accept": false, "left": "pull_request:43438", "reason": "Completely different scope: CPU `batched_mm` docs/perf work versus a DepthPro integration test fix.", "right": "pull_request:44456"}, {"accept": false, "left": "pull_request:43438", "reason": "Unrelated: `batched_mm` CPU performance/docs versus a GPTNeo generation test expectation fix.", "right": "pull_request:44515"}, {"accept": false, "left": "pull_request:43438", "reason": "One is `batched_mm` CPU work; the other is documentation for continuous batching config.", "right": "pull_request:44675"}, {"accept": false, "left": "pull_request:43438", "reason": "Different artifacts and bugs: `batched_mm` CPU behavior versus Marian integration test expectations.", "right": "pull_request:44519"}, {"accept": true, "left": "pull_request:44602", "reason": "Same underlying `cache_position` removal cleanup across model forward/cache code paths; 44828 reads as the continuation/final pass of the same change.", "right": "pull_request:44828"}], "summary": "Mostly a heterogeneous bundle of unrelated PRs (model additions, docs, typing, test fixes, and separate bug fixes). The only clear duplicate relationship is the two-stage `cache_position` removal series in more models."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44251|pull_request:44285", "pull_request:43403|pull_request:44490", "pull_request:44412|pull_request:44566", "pull_request:43071|pull_request:44300", "pull_request:44497|pull_request:45336", "pull_request:43438|pull_request:44456", "pull_request:43438|pull_request:44515", "pull_request:43438|pull_request:44675", "pull_request:43438|pull_request:44519", "pull_request:44602|pull_request:44828"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20692, "estimated_input_tokens": 10218, "item_count": 18, "node_count": 18, "serialized_chars": 40870, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:23:04Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "997c834a0bd39d2b8994bd7f39d2fca8bb856a2fa190700350a79be70b01ae04", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42230", "pull_request:43445", "pull_request:43710", "pull_request:43896", "pull_request:43938", "pull_request:44269", "pull_request:44321", "pull_request:44428", "pull_request:44482", "pull_request:44490", "pull_request:44536", "pull_request:44595", "pull_request:44662", "pull_request:44733", "pull_request:44827", "pull_request:45044", "pull_request:45204", "pull_request:45315"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "45315 is the best representative of the small MoE-router subset: it fixes a concrete routing bug across multiple MoE models and is more substantive than the nearby MoE-related mapping change in 43445.", "canonical_issue_reason": null, "canonical_pr_reason": "If one PR must represent the cluster, 45315 is the closest fit because it has the clearest explicit issue target and the broadest concrete code change, but the cluster is not a true duplicate set.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:42230", "reason": "Both are XPU-adjacent, but one changes attention-mask handling in model code and the other only updates VibeVoice test fixtures/expectations.", "right": "pull_request:44428"}, {"accept": false, "left": "pull_request:42230", "reason": "They mention XPU, but they fix different concrete problems in different code paths: attention-mask unmasking vs flash-attention kernel fallback plus tests.", "right": "pull_request:44733"}, {"accept": false, "left": "pull_request:43938", "reason": "Different models and failure modes; one updates Exaone-MoE expectations, the other skips invalid VoxtralRealtime tests.", "right": "pull_request:44321"}, {"accept": false, "left": "pull_request:43896", "reason": "Both are test-related, but they target unrelated models and unrelated regressions (DAC outputs vs Mistral4 tests).", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:44595", "reason": "Both add model support, but CHMv2 and PenguinVL are distinct model ports with separate implementations and docs.", "right": "pull_request:44662"}, {"accept": false, "left": "pull_request:43710", "reason": "Docs-only changes, but they cover unrelated documentation topics and are not the same change.", "right": "pull_request:44269"}, {"accept": false, "left": "pull_request:44321", "reason": "One is a test skip adjustment, the other is a EuroBERT model-parallelism fix; different underlying issues.", "right": "pull_request:44490"}, {"accept": false, "left": "pull_request:43445", "reason": "Both involve MoE routing, but 43445 is conversion/renaming mapping while 45315 fixes router softmax/top-k logic; not the same concrete bug.", "right": "pull_request:45315"}, {"accept": false, "left": "pull_request:45044", "reason": "Both are model-specific generation/device fixes, but Janus image generation and VideoMT device mismatch are separate code paths.", "right": "pull_request:45204"}, {"accept": false, "left": "pull_request:44482", "reason": "Both touch XPU/TP-related tests, but one rewrites Higgs Audio expectations while the other only expands a CPU-only skip guard.", "right": "pull_request:44536"}], "summary": "This cluster is a grab-bag of mostly unrelated PRs spanning attention-mask fixes, MoE router changes, model-specific bug fixes, docs, test updates, and new model additions. I do not see any soft-edge pair that looks like the same underlying bug/change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:42230|pull_request:44428", "pull_request:42230|pull_request:44733", "pull_request:43938|pull_request:44321", "pull_request:43896|pull_request:44827", "pull_request:44595|pull_request:44662", "pull_request:43710|pull_request:44269", "pull_request:44321|pull_request:44490", "pull_request:43445|pull_request:45315", "pull_request:45044|pull_request:45204", "pull_request:44482|pull_request:44536"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20594, "estimated_input_tokens": 10169, "item_count": 18, "node_count": 18, "serialized_chars": 40675, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:23:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7224c6fc4ab2f48dcab42ce7646eec06f965ab420849b1c5a0149d0d8ed411d1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42668", "pull_request:43324", "pull_request:43592", "pull_request:43907", "pull_request:43919", "pull_request:43936", "pull_request:44040", "pull_request:44051", "pull_request:44428", "pull_request:44536", "pull_request:44602", "pull_request:44733", "pull_request:45044", "pull_request:45049", "pull_request:45209", "pull_request:45212", "pull_request:45214", "pull_request:45336"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44602 is the strongest standalone change to use as the cluster\u2019s representative because it makes a concrete multi-model runtime fix in core attention handling, with clear implementation impact and broader relevance than the mostly test-only PRs.", "canonical_issue_reason": null, "canonical_pr_reason": "No PR here is a true duplicate of the others. If one must anchor the cluster, PR 44602 is the broadest substantive runtime fix and the most representative of an actual code-path change, but it is still unrelated to most other items.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45209", "reason": "Both are device-related, but one only relaxes Nomic BERT test expectations while the other fixes a Cohere ASR model_parallel_beam_search path by moving positional embeddings to device. Different bugs, different code paths.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:43324", "reason": "Both touch XPU test support, but one adjusts Minimax M2 expected logits and the other adds/removes fixture support for MusicFlamingo. These are separate test maintenance changes, not the same bug.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:43919", "reason": "Trainer gradient synchronization and RoPE kwargs handling are unrelated runtime issues. They affect different subsystems and could not plausibly be one PR.", "right": "pull_request:45049"}, {"accept": false, "left": "pull_request:44040", "reason": "Same model family, but different changes: one fixes Gemma3n get_audio_features behavior, the other corrects a forward type hint. Not the same underlying bug or change.", "right": "pull_request:44051"}, {"accept": false, "left": "pull_request:44602", "reason": "Both are model-internal code changes, but they fix different problems in different models: cache_position removal across several models versus Gemma4 shared-weight handling. No common concrete bug.", "right": "pull_request:45336"}, {"accept": false, "left": "pull_request:44428", "reason": "One updates VibeVoice XPU expectations; the other changes tensor-parallel test skip logic for GPU/XPU. These are separate test infra adjustments.", "right": "pull_request:44536"}, {"accept": false, "left": "pull_request:44536", "reason": "Tensor-parallel CPU-only skip logic is unrelated to the flash-attention kernel fallback plus XPU expectations for Qwen2/Jamba. Shared XPU mention is not enough to duplicate them.", "right": "pull_request:44733"}, {"accept": false, "left": "pull_request:43907", "reason": "GLM image test expectation updates are unrelated to Janus image-generation bugfixes in generation config handling. Different models and different failure modes.", "right": "pull_request:45044"}, {"accept": false, "left": "pull_request:42668", "reason": "AudioFlamingo/auto processor robustness and configuration default-value fixes are both infra-level, but they address different code paths and different failure classes.", "right": "pull_request:43592"}, {"accept": false, "left": "pull_request:43324", "reason": "Minimax M2 XPU test expectations and Moonshine Streaming device-placement fixes are unrelated. One is test data, the other is a runtime tensor device bug.", "right": "pull_request:43936"}], "summary": "This cluster is a mix of unrelated PRs: test expectation updates, device-specific test skips, configuration cleanups, and distinct model/runtime bug fixes. The soft matches are mostly driven by broad vocabulary overlap (e.g. XPU, model tests, or shared model families) rather than the same underlying defect, so none should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45209|pull_request:45214", "pull_request:43324|pull_request:45212", "pull_request:43919|pull_request:45049", "pull_request:44040|pull_request:44051", "pull_request:44602|pull_request:45336", "pull_request:44428|pull_request:44536", "pull_request:44536|pull_request:44733", "pull_request:43907|pull_request:45044", "pull_request:42668|pull_request:43592", "pull_request:43324|pull_request:43936"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20964, "estimated_input_tokens": 10354, "item_count": 18, "node_count": 18, "serialized_chars": 41413, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:23:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9bbae807a83956b9e0f4d7071df52ec35cb4468afcb7d12a27611762aa847ac5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41763", "pull_request:43339", "pull_request:43488", "pull_request:43565", "pull_request:43936", "pull_request:43999", "pull_request:44001", "pull_request:44002", "pull_request:44025", "pull_request:44026", "pull_request:44027", "pull_request:44051", "pull_request:44325", "pull_request:44330", "pull_request:44412", "pull_request:44801", "pull_request:45336", "pull_request:45425"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No single PR is a good global representative: the set spans different models, different subsystems, and different change intents, so picking one would be misleading.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR stands out because the cluster does not represent one underlying bug or feature; the PRs cover unrelated code paths and maintenance tasks.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43488", "reason": "Both touch repository/bot infrastructure, but they are different workflow changes with different purposes; not the same underlying fix and not mergeable as one PR.", "right": "pull_request:43565"}, {"accept": false, "left": "pull_request:41763", "reason": "Same model family, but one adds TimesFM 2.5 and the other fixes its MLP bias; these are distinct changes on different code paths.", "right": "pull_request:44325"}, {"accept": false, "left": "pull_request:44412", "reason": "Both are typing-related, but one adds type-check coverage for quantizers while the other adds a typing alias and updates modeling_utils; different scope and no shared bug.", "right": "pull_request:45425"}, {"accept": false, "left": "pull_request:43339", "reason": "Unrelated models and test failures; they do not fix the same bug or change the same concrete code path.", "right": "pull_request:43936"}, {"accept": false, "left": "pull_request:43999", "reason": "Both are output-tracing refactors, but for different models and implementations; not the same underlying change.", "right": "pull_request:44001"}, {"accept": false, "left": "pull_request:43999", "reason": "Different model and forward signature refactor; same broad theme only, not a single bug or mergeable change.", "right": "pull_request:44002"}, {"accept": false, "left": "pull_request:43999", "reason": "Both concern tracing/output handling, but they target different model code paths and are separate refactors.", "right": "pull_request:44025"}, {"accept": false, "left": "pull_request:43999", "reason": "Different model refactor with no shared bug; too broad a similarity to treat as duplicates.", "right": "pull_request:44026"}, {"accept": false, "left": "pull_request:43999", "reason": "Same high-level refactor theme, but distinct models and code paths; not one concrete change.", "right": "pull_request:44027"}, {"accept": false, "left": "pull_request:44330", "reason": "These are unrelated model changes: one removes cache_position across many models, the other changes Gemma4 shared weights/loading behavior.", "right": "pull_request:45336"}, {"accept": false, "left": "pull_request:44051", "reason": "Completely unrelated areas: Gemma3n forward type hints versus tokenizer-class mapping on the hub.", "right": "pull_request:44801"}], "summary": "This cluster is highly heterogeneous: it mixes unrelated model additions, bug fixes, typing/CI maintenance, and test updates. None of the soft edges looks like a true duplicate or a single change that could plausibly be merged into one PR."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43488|pull_request:43565", "pull_request:41763|pull_request:44325", "pull_request:44412|pull_request:45425", "pull_request:43339|pull_request:43936", "pull_request:43999|pull_request:44001", "pull_request:43999|pull_request:44002", "pull_request:43999|pull_request:44025", "pull_request:43999|pull_request:44026", "pull_request:43999|pull_request:44027", "pull_request:44330|pull_request:45336", "pull_request:44051|pull_request:44801"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18686, "estimated_input_tokens": 9215, "item_count": 18, "node_count": 18, "serialized_chars": 36857, "soft_pair_count": 13}, "cached_at": "2026-04-14T13:24:31Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8b931e1f0b647f0b273d35de1fbfc63ce671e25411b048db147f61a8d1478ccb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42230", "pull_request:42802", "pull_request:43341", "pull_request:43699", "pull_request:43759", "pull_request:43907", "pull_request:43910", "pull_request:43936", "pull_request:44082", "pull_request:44296", "pull_request:44353", "pull_request:44426", "pull_request:44482", "pull_request:44502", "pull_request:45044", "pull_request:45049", "pull_request:45204", "pull_request:45214"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "45204 is the strongest representative PR: it addresses an actual runtime bug in the forward path, with a narrow code change and test coverage, unlike the test-only or metadata-oriented PRs.", "canonical_issue_reason": null, "canonical_pr_reason": "No true PR duplicate emerges from the candidates; if one PR must represent the group, 45204 is the cleanest standalone bug fix because it patches a concrete device-mismatch in model code and includes regression tests.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45204", "reason": "Different models and different failure modes: VideoMT device placement vs Cohere ASR beam-search test behavior.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:44082", "reason": "Unrelated fixes: PatchTSMixer post-init handling vs RoPE kwargs conversion in configuration utils.", "right": "pull_request:45049"}, {"accept": false, "left": "pull_request:42802", "reason": "Different model families and bugs: LASR flex-attn support flag vs Cohere ASR beam-search test case.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:43759", "reason": "Both touch XPU/determinism tests, but they are different models and independent expectation updates.", "right": "pull_request:44426"}, {"accept": false, "left": "pull_request:44296", "reason": "Docstring generation for configs is unrelated to the torch.mlu type-checker fix.", "right": "pull_request:44502"}, {"accept": false, "left": "pull_request:44353", "reason": "Different models and test updates; not the same bug or code path.", "right": "pull_request:44426"}, {"accept": false, "left": "pull_request:44426", "reason": "Both are XPU expectation updates, but for different model test suites with different outputs.", "right": "pull_request:44482"}, {"accept": false, "left": "pull_request:43341", "reason": "Unsupported-test skipping is a test harness change, not the same as altering tensor-parallel init backend usage.", "right": "pull_request:43699"}, {"accept": false, "left": "pull_request:43699", "reason": "Tensor-parallel backend selection and Jais2 expected outputs are unrelated changes.", "right": "pull_request:43910"}, {"accept": false, "left": "pull_request:43699", "reason": "Different issues: TP test backend choice vs Moonshine streaming device transfer bug.", "right": "pull_request:43936"}, {"accept": false, "left": "pull_request:43936", "reason": "Different model code paths and failures: Moonshine device mismatch vs Janus generation config handling.", "right": "pull_request:45044"}, {"accept": false, "left": "pull_request:43699", "reason": "Tensor-parallel backend setup is unrelated to GLM image test expectations.", "right": "pull_request:43907"}, {"accept": false, "left": "pull_request:42230", "reason": "Generic XPU device support in attention masks is not the same as Qwen2_5_VL test expectation updates.", "right": "pull_request:44426"}], "summary": "The set is heterogeneous; the soft edges are mostly superficial overlaps around XPU, determinism, tests, or device placement, not the same underlying bug/change. I reject all candidate duplicate links."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45204|pull_request:45214", "pull_request:44082|pull_request:45049", "pull_request:42802|pull_request:45214", "pull_request:43759|pull_request:44426", "pull_request:44296|pull_request:44502", "pull_request:44353|pull_request:44426", "pull_request:44426|pull_request:44482", "pull_request:43341|pull_request:43699", "pull_request:43699|pull_request:43910", "pull_request:43699|pull_request:43936", "pull_request:43936|pull_request:45044", "pull_request:43699|pull_request:43907", "pull_request:42230|pull_request:44426"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18350, "estimated_input_tokens": 9047, "item_count": 18, "node_count": 18, "serialized_chars": 36187, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:24:52Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2aa04f6ccca1dd7f6cb94d8a20d9d08a2cd4b545c9164fea2beac89d7903510f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42802", "pull_request:43341", "pull_request:43464", "pull_request:43555", "pull_request:43565", "pull_request:43592", "pull_request:43615", "pull_request:43823", "pull_request:43910", "pull_request:43920", "pull_request:43938", "pull_request:44321", "pull_request:44426", "pull_request:44428", "pull_request:45044", "pull_request:45061", "pull_request:45214", "pull_request:45261"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No PR cleanly represents the whole cluster because the items are heterogeneous and unrelated beyond broad repository area.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR is appropriate: the PRs target different models, tests, or workflows and do not represent one shared bug or change.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44426", "reason": "Both adjust XPU-related expected values, but for different models and different test fixtures; not the same bug or patch.", "right": "pull_request:44428"}, {"accept": false, "left": "pull_request:42802", "reason": "Both skip unsupported tests, but for different models and different constraints; not mergeable as one fix.", "right": "pull_request:44321"}, {"accept": false, "left": "pull_request:43615", "reason": "Both update test expectations, but they cover different models and separate failure modes.", "right": "pull_request:43910"}, {"accept": false, "left": "pull_request:43341", "reason": "One skips unsupported glm_image tests; the other fixes Janus generation logic. Different code paths and bugs.", "right": "pull_request:45044"}, {"accept": false, "left": "pull_request:43910", "reason": "Jais2 expected-output update versus Janus generation bug fix; unrelated changes.", "right": "pull_request:45044"}, {"accept": false, "left": "pull_request:43341", "reason": "Unsupported-test skips for glm_image versus a cohere_asr beam-search/device fix; not the same underlying issue.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:43555", "reason": "Both touch workflow permissions, but one changes the style bot workflow and the other alters repo-consistency workflow plus deletes a file; not one concrete fix.", "right": "pull_request:43565"}, {"accept": false, "left": "pull_request:42802", "reason": "Lasr flex-attn compatibility fix versus exaone_moe expected-value update; different models and problems.", "right": "pull_request:43938"}, {"accept": false, "left": "pull_request:43592", "reason": "Configuration default-value cleanup across existing models is unrelated to adding a new MobileLLM model package.", "right": "pull_request:43823"}, {"accept": false, "left": "pull_request:43464", "reason": "MarkupLM integration-test dtype change and CircleCI failure-summary workflow parsing are unrelated changes.", "right": "pull_request:45261"}, {"accept": false, "left": "pull_request:43920", "reason": "Tests-fetcher cache_utils trigger and flash-attn deprecation/backward-compatibility helper are unrelated maintenance changes.", "right": "pull_request:45061"}], "summary": "This cluster is not a duplicate set; it\u2019s a loose grouping of unrelated PRs across model fixes, test skips/expectations, and CI/workflow maintenance. None of the soft edges look like the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44426|pull_request:44428", "pull_request:42802|pull_request:44321", "pull_request:43615|pull_request:43910", "pull_request:43341|pull_request:45044", "pull_request:43910|pull_request:45044", "pull_request:43341|pull_request:45214", "pull_request:43555|pull_request:43565", "pull_request:42802|pull_request:43938", "pull_request:43592|pull_request:43823", "pull_request:43464|pull_request:45261", "pull_request:43920|pull_request:45061"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18986, "estimated_input_tokens": 9365, "item_count": 17, "node_count": 17, "serialized_chars": 37460, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:25:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d1e6fd573ef888cf07ddc3ee9a3750993bc9f077973eef43fc32beb73a765cbb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:39390", "pull_request:42230", "pull_request:42668", "pull_request:43341", "pull_request:43588", "pull_request:43592", "pull_request:43913", "pull_request:43956", "pull_request:44033", "pull_request:44126", "pull_request:44527", "pull_request:44602", "pull_request:44801", "pull_request:45032", "pull_request:45209", "pull_request:45261", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45340 is the best single representative of the cluster because it touches core shared conversion code across multiple model families; however, the overall cluster is not truly one duplicate group.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45340 is the broadest and most central change in the set: it updates shared conversion-mapping logic plus loading code, has an inbound reference, and is more representative than the narrow test or model-specific patches.", "confidence": 0.87, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43592", "reason": "Both are model-support/meta fixes, but they target different model families and different problems: config defaults vs tokenizer-class registry/hub compatibility.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:44527", "reason": "One fixes a MusicGen integration test, the other changes CircleCI failure-summary comment parsing. No shared code path or underlying bug.", "right": "pull_request:45261"}, {"accept": false, "left": "pull_request:42668", "reason": "Both touch conversion/processor auto-mapping, but they fix different subsystems and different model families; not the same concrete bug.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44126", "reason": "Both relate loosely to generation/cache behavior, but one refactors generation input prep while the other removes cache_position from many model forwards. Too broad and not the same fix.", "right": "pull_request:44602"}, {"accept": false, "left": "pull_request:43588", "reason": "Different model-specific changes in unrelated areas: Qwen3 Omni MoE video feature typing vs Cohere2-style config defaults.", "right": "pull_request:43592"}, {"accept": false, "left": "pull_request:43341", "reason": "Both are test-only adjustments, but they address different models and different execution assumptions. Not the same underlying issue.", "right": "pull_request:45209"}, {"accept": true, "left": "pull_request:43913", "reason": "Both fix the same qwen3_vl_moe weight-mapping/conversion path. The implementations differ, but they are clearly alternative ways to resolve the same concrete checkpoint conversion bug.", "right": "pull_request:43956"}, {"accept": false, "left": "pull_request:39390", "reason": "One is a ShieldGemma2 test CI workaround; the other is an attention-mask behavior fix for xpu. Different code paths and failure modes.", "right": "pull_request:42230"}, {"accept": false, "left": "pull_request:44033", "reason": "Both are CI-related, but one improves PR-comment handling and the other parallelizes failed-test checking. They do not fix the same bug or change.", "right": "pull_request:45032"}], "summary": "Mostly a heterogeneous set of unrelated PRs. The only soft edge that looks like the same underlying fix is the qwen3_vl_moe checkpoint-conversion mapping pair; the rest are different models, tests, CI workflows, or unrelated infrastructure changes."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43592|pull_request:44801", "pull_request:44527|pull_request:45261", "pull_request:42668|pull_request:45340", "pull_request:44126|pull_request:44602", "pull_request:43588|pull_request:43592", "pull_request:43341|pull_request:45209", "pull_request:43913|pull_request:43956", "pull_request:39390|pull_request:42230", "pull_request:44033|pull_request:45032"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 21708, "estimated_input_tokens": 10726, "item_count": 18, "node_count": 18, "serialized_chars": 42902, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:25:50Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "266d2e4631f7f4d5a963cace94b075156d8d229e6eef4b1889d6a44b1200c96c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42802", "pull_request:42848", "pull_request:43324", "pull_request:43400", "pull_request:43579", "pull_request:43710", "pull_request:43759", "pull_request:44157", "pull_request:44353", "pull_request:44414", "pull_request:44600", "pull_request:44647", "pull_request:44657", "pull_request:44801", "pull_request:44833", "pull_request:44834", "pull_request:45212", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 42848 is the best representative anchor if one PR must be chosen, because it is the highest-impact infrastructure change in the cluster rather than a narrow test or docs tweak.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 42848 is the broadest and most central item in the set, with the largest cross-model attention-mask refactor and the most discussion/inbound references; however, it is still not a duplicate of the others.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44647", "reason": "Continuous batching/device support work vs an Electra test-only BF16 config fix; different codepaths and goals.", "right": "pull_request:44657"}, {"accept": false, "left": "pull_request:44414", "reason": "Model-loading tqdm verbosity cleanup is unrelated to tokenizer class mapping on the hub.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:43400", "reason": "Adding a new acoustic tokenizer and editing docs for GLM-OCR/EomT-DINOv3 are distinct changes with no shared bug.", "right": "pull_request:43710"}, {"accept": false, "left": "pull_request:42848", "reason": "42848 is a broad attention-mask interface migration; 44157 is a specific packed-input mask fix for Qwen-VL. Related theme, but not the same concrete change.", "right": "pull_request:44157"}, {"accept": false, "left": "pull_request:44833", "reason": "Both touch modular examples, but one is a converter rerun/fix while the other updates example code; not the same underlying change.", "right": "pull_request:44834"}, {"accept": false, "left": "pull_request:42802", "reason": "LASR flex-attention support change and Minimax-M2 XPU test expectations are unrelated.", "right": "pull_request:43324"}, {"accept": false, "left": "pull_request:43579", "reason": "Separate XPU test additions for different models; same general maintenance theme, but not a duplicate bug or change.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:44600", "reason": "A no-op assignment removal in Paligemma is unrelated to conversion-mapping fixes for VLMs.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43759", "reason": "Determinism decorators for Youtu tests and XPU expectations for OLMo Hybrid are different test fixes for different models.", "right": "pull_request:44353"}], "summary": "This cluster is mostly unrelated PRs spanning docs, tests, model tweaks, and utility refactors. The soft-similarity links are all false positives; none of the PRs look like the same concrete bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44647|pull_request:44657", "pull_request:44414|pull_request:44801", "pull_request:43400|pull_request:43710", "pull_request:42848|pull_request:44157", "pull_request:44833|pull_request:44834", "pull_request:42802|pull_request:43324", "pull_request:43579|pull_request:45212", "pull_request:44600|pull_request:45340", "pull_request:43759|pull_request:44353"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 17802, "estimated_input_tokens": 8773, "item_count": 17, "node_count": 17, "serialized_chars": 35090, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:26:16Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2d734ca5c64a8a0b089eeea2b8ae7ce5aab658f0608dc29093a7011e632692b7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42230", "pull_request:43178", "pull_request:43614", "pull_request:43759", "pull_request:43913", "pull_request:43953", "pull_request:43989", "pull_request:44125", "pull_request:44647", "pull_request:44657", "pull_request:44733", "pull_request:45033", "pull_request:45044", "pull_request:45061", "pull_request:45204", "pull_request:45209", "pull_request:45284"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No single PR cleanly represents the cluster because the items are not duplicates; they span unrelated fixes in attention masking, video processors, generation, tests, and model conversion. If forced to pick the closest duplicate pair, the Qwen3VL-MoE conversion-mapping PRs are the only plausible match.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44657", "reason": "Both are test-only expectation adjustments, but for different models and different failure modes; they do not fix the same concrete bug.", "right": "pull_request:44733"}, {"accept": false, "left": "pull_request:44647", "reason": "Different code paths and products: continuous batching device support vs. VideomT device placement bug.", "right": "pull_request:45204"}, {"accept": false, "left": "pull_request:44647", "reason": "These are unrelated: continuous batching runtime changes versus a Nomic-BERT test expectation update.", "right": "pull_request:45209"}, {"accept": false, "left": "pull_request:43614", "reason": "Different models and problems: DiffLlama XPU contiguity for SDPA vs. Youtu deterministic test gating.", "right": "pull_request:43759"}, {"accept": false, "left": "pull_request:42230", "reason": "Both mention XPU/device behavior, but they address different subsystems and distinct bugs.", "right": "pull_request:44647"}, {"accept": false, "left": "pull_request:43178", "reason": "AMD pipeline CI fixes are not the same change as Qwen2 expectation updates; they touch different models and concerns.", "right": "pull_request:45284"}, {"accept": true, "left": "pull_request:43913", "reason": "Both are about fixing Qwen3VL-MoE checkpoint conversion/mapping. They target the same underlying conversion bug and could plausibly be consolidated into one PR.", "right": "pull_request:43953"}, {"accept": false, "left": "pull_request:45033", "reason": "One adds backward compatibility for a renamed image-processing helper; the other adds a flash-attn version deprecation shim. Different APIs, different bugs.", "right": "pull_request:45061"}, {"accept": false, "left": "pull_request:43989", "reason": "Both touch video-processing auto code, but one fixes None entries when torchvision is unavailable while the other refactors class lookup/error handling; these are not the same concrete issue.", "right": "pull_request:44125"}, {"accept": false, "left": "pull_request:44657", "reason": "Different models and different fixes: Electra test BF16 stability vs. VideomT device mismatch in model forward.", "right": "pull_request:45204"}], "summary": "This cluster is mostly a grab bag of unrelated PRs across different models and subsystems. Only the two Qwen3VL-MoE conversion-mapping PRs appear to address the same underlying checkpoint-conversion bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44657|pull_request:44733", "pull_request:44647|pull_request:45204", "pull_request:44647|pull_request:45209", "pull_request:43614|pull_request:43759", "pull_request:42230|pull_request:44647", "pull_request:43178|pull_request:45284", "pull_request:43913|pull_request:43953", "pull_request:45033|pull_request:45061", "pull_request:43989|pull_request:44125", "pull_request:44657|pull_request:45044"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18610, "estimated_input_tokens": 9177, "item_count": 17, "node_count": 17, "serialized_chars": 36706, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:26:49Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "75fb7689c3b65e496305294412bab2f31b8ae530f0960dd8d149290c1f0d428e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42668", "pull_request:43339", "pull_request:43579", "pull_request:43710", "pull_request:43712", "pull_request:43758", "pull_request:43759", "pull_request:43913", "pull_request:43920", "pull_request:43936", "pull_request:44426", "pull_request:44428", "pull_request:44482", "pull_request:44653", "pull_request:44662", "pull_request:44801", "pull_request:45061"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:44662 is the largest substantive change in the set (a full PenguinVL implementation) and is the most representative single artifact, but it is not a duplicate of the other PRs.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43936", "reason": "Different fixes: moonshine_streaming device placement in model forward vs AutoImageProcessor local-implementation detection. Separate code paths and not mergeable as one PR.", "right": "pull_request:44653"}, {"accept": false, "left": "pull_request:43758", "reason": "Both stabilize test outputs, but for different models and different failure modes (eomt_dinov3 vs qwen2_5_vl). Same pattern, not the same underlying bug.", "right": "pull_request:44426"}, {"accept": false, "left": "pull_request:43759", "reason": "Both add deterministic/XPU expectations, but they target different model test suites (youtu vs higgs_audio_v2) with unrelated outputs.", "right": "pull_request:44482"}, {"accept": false, "left": "pull_request:43712", "reason": "Completely different changes: typing/comment cleanup in modeling_utils vs flash-attn compatibility/deprecation helper in utils.", "right": "pull_request:45061"}, {"accept": false, "left": "pull_request:43712", "reason": "modeling_utils typing cleanup and tests_fetcher cache_utils trigger changes are unrelated maintenance tasks.", "right": "pull_request:43920"}, {"accept": false, "left": "pull_request:42668", "reason": "Both touch auto tokenization plumbing, but one fixes AudioFlamingo3 processor registration while the other updates tokenizer mappings for DeepSeek/ModernBERT. Different models and issues.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:43339", "reason": "Both are test expectation updates, but for different models (lw_detr vs solar_open) and different outputs. Not the same bug.", "right": "pull_request:43579"}, {"accept": false, "left": "pull_request:43759", "reason": "Both stabilize tests with XPU expectations, but one is Youtu LLM and the other is VibeVoice acoustic tokenizer. No shared concrete defect.", "right": "pull_request:44428"}, {"accept": false, "left": "pull_request:43712", "reason": "General model property typing cleanup is unrelated to qwen3_vl_moe checkpoint conversion/transpose sentinel logic.", "right": "pull_request:43913"}, {"accept": false, "left": "pull_request:43710", "reason": "Docs-only GLM-OCR/EomT-DINOv3 updates are unrelated to the PenguinVL implementation and supporting auto/model files.", "right": "pull_request:44662"}], "summary": "This cluster is a grab bag of unrelated maintenance PRs: model implementation, docs, tokenization mapping, cache/test infrastructure, and per-model XPU expectation updates. The similarity is mostly superficial (test stabilization or Transformers plumbing), not a shared underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43936|pull_request:44653", "pull_request:43758|pull_request:44426", "pull_request:43759|pull_request:44482", "pull_request:43712|pull_request:45061", "pull_request:43712|pull_request:43920", "pull_request:42668|pull_request:44801", "pull_request:43339|pull_request:43579", "pull_request:43759|pull_request:44428", "pull_request:43712|pull_request:43913", "pull_request:43710|pull_request:44662"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 21810, "estimated_input_tokens": 10777, "item_count": 18, "node_count": 18, "serialized_chars": 43106, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:27:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "dd8a7aeed9d46d601a16a83907091cf3c45e7e8b25d3367c4f432938dfd39382", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42668", "pull_request:43400", "pull_request:43553", "pull_request:43565", "pull_request:43758", "pull_request:43759", "pull_request:43787", "pull_request:43930", "pull_request:44125", "pull_request:44286", "pull_request:44490", "pull_request:44601", "pull_request:44653", "pull_request:44808", "pull_request:44828", "pull_request:45209", "pull_request:45212", "pull_request:45336"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44808 is the strongest standalone representative if one PR must be picked: it is a large, coherent merged feature addition with substantial implementation and test coverage, unlike the smaller one-off fixes in the rest of the set.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44601", "reason": "Different changes: native pipeline-parallel loading support vs adding PP-OCRv5 model support. They touch unrelated code paths and could not plausibly be merged as one fix.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:43758", "reason": "Both stabilize tests, but for different models and different failure modes. Same pattern, not the same underlying bug.", "right": "pull_request:45209"}, {"accept": false, "left": "pull_request:44490", "reason": "EuroBERT model-parallel masking fix is unrelated to AutoImageProcessor local implementation detection. Different subsystems and code paths.", "right": "pull_request:44653"}, {"accept": false, "left": "pull_request:42668", "reason": "Both involve auto-loading helpers, but one is processor-from-pretrained robustness and the other is video processor error handling. Different concrete bugs.", "right": "pull_request:44125"}, {"accept": false, "left": "pull_request:43758", "reason": "Both add deterministic XPU guards to tests, but they are for different model test suites and different outputs.", "right": "pull_request:43759"}, {"accept": false, "left": "pull_request:43787", "reason": "PT 2.4 cleanup and autocast handling improvements are separate code changes with different motivations and affected files.", "right": "pull_request:43930"}, {"accept": false, "left": "pull_request:44828", "reason": "Removing cache_position in many models is unrelated to Gemma4 shared-weight loading behavior. Different model internals and bug reports.", "right": "pull_request:45336"}, {"accept": false, "left": "pull_request:43400", "reason": "VibeVoice acoustic tokenizer support and Fouroversix model support are distinct feature additions for different architectures.", "right": "pull_request:44286"}, {"accept": false, "left": "pull_request:43758", "reason": "Reiterated: same stabilization style, but entirely different model tests and expected outputs, so not duplicates.", "right": "pull_request:43759"}, {"accept": false, "left": "pull_request:43553", "reason": "Both touch bot workflows, but one adds a style-bot workflow and the other edits repo-consistency permissions plus deletes/reworks the workflow. Related area, not the same PR-level change.", "right": "pull_request:43565"}], "summary": "This cluster is heterogeneous: the PRs span unrelated model additions, test tweaks, workflow fixes, and infrastructure changes. None of the suggested soft links appear to be the same underlying bug or feature change, so all should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44601|pull_request:44808", "pull_request:43758|pull_request:45209", "pull_request:44490|pull_request:44653", "pull_request:42668|pull_request:44125", "pull_request:43758|pull_request:45212", "pull_request:43787|pull_request:43930", "pull_request:44828|pull_request:45336", "pull_request:43400|pull_request:44286", "pull_request:43758|pull_request:43759", "pull_request:43553|pull_request:43565"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18466, "estimated_input_tokens": 9105, "item_count": 17, "node_count": 17, "serialized_chars": 36420, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:27:37Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "69eda706cee08a90c81c6f458dda2035550711b94e3816ca14f7878aecd8f4da", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43324", "pull_request:43500", "pull_request:43554", "pull_request:43555", "pull_request:43565", "pull_request:43579", "pull_request:43710", "pull_request:43712", "pull_request:43913", "pull_request:43916", "pull_request:44236", "pull_request:44330", "pull_request:44414", "pull_request:44433", "pull_request:44497", "pull_request:44544", "pull_request:45044"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44433 is the strongest standalone representative: it fixes a specific failure mode with broad model coverage and accompanying tests, unlike the more incidental doc/workflow/typing changes.", "canonical_issue_reason": null, "canonical_pr_reason": "No true duplicate cluster emerges; if one PR must represent the set, 44433 is the clearest substantive bug fix because it addresses a concrete multimodal runtime error and updates several affected models/tests.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43710", "reason": "Different workstreams: docs/model-card edits versus removing cache_position in generation code. Same repo area, but not the same bug or change.", "right": "pull_request:44330"}, {"accept": false, "left": "pull_request:44497", "reason": "Unrelated code paths: tie-weights behavior in model saving/loading versus ANSI styling in loading reports.", "right": "pull_request:44544"}, {"accept": false, "left": "pull_request:43324", "reason": "Both are XPU-related test adjustments, but they target different models and different expected outputs, so they are not the same underlying fix.", "right": "pull_request:43579"}, {"accept": false, "left": "pull_request:44236", "reason": "Separate concerns: DeepSpeed/zero3 init config versus tqdm verbosity in model loading. They could coexist, but they are not duplicate changes.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:43554", "reason": "Both touch workflow/bot permissions, but one adjusts style bot behavior and the other changes repo-consistency permissions and deletes a workflow. Related, not the same concrete change.", "right": "pull_request:43565"}, {"accept": false, "left": "pull_request:43913", "reason": "Different mechanics and targets: qwen3_vl_moe weight-mapping/sentinel logic versus tie-weight symmetry in model saving.", "right": "pull_request:44497"}, {"accept": false, "left": "pull_request:43500", "reason": "The first is a permission-check/test PR with placeholder edits; the second is an actual workflow permission/config fix. Same theme, different artifact purpose and change.", "right": "pull_request:43555"}, {"accept": false, "left": "pull_request:44433", "reason": "Both are multimodal generation fixes, but they affect different models and different failure modes, so they are not the same bug.", "right": "pull_request:45044"}, {"accept": false, "left": "pull_request:43712", "reason": "Typing/documentation cleanup in model base utilities is unrelated to the `input_embeds`/`inputs_embeds` renaming and generation behavior fix.", "right": "pull_request:43916"}], "summary": "The set is mostly unrelated PRs across model tests, workflow bot permissions, docs, typing cleanup, and a few concrete bug fixes. No soft pair looks like the same underlying change closely enough to merge for duplicate triage."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43710|pull_request:44330", "pull_request:44497|pull_request:44544", "pull_request:43324|pull_request:43579", "pull_request:44236|pull_request:44414", "pull_request:43554|pull_request:43565", "pull_request:43913|pull_request:44497", "pull_request:43500|pull_request:43555", "pull_request:44433|pull_request:45044", "pull_request:43712|pull_request:43916"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19562, "estimated_input_tokens": 9653, "item_count": 17, "node_count": 17, "serialized_chars": 38611, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:28:01Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "85b13a2ab69380dabee4c911c8f2c572009fed6bfe874fb43eb7fdf74a3539f9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42186", "pull_request:43341", "pull_request:43486", "pull_request:43710", "pull_request:43778", "pull_request:43910", "pull_request:43913", "pull_request:43936", "pull_request:44236", "pull_request:44353", "pull_request:44497", "pull_request:44828", "pull_request:45061", "pull_request:45204", "pull_request:45336", "pull_request:45340", "pull_request:45407"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No single PR is a good global representative because the cluster is heterogeneous and lacks a shared bug/change.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR exists; the items are not duplicates and cover unrelated changes across many subsystems.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44353", "reason": "Different topics: OLMO-Hybrid XPU test expectations vs Trainer DataParallel handling for 4-bit quantized models.", "right": "pull_request:45407"}, {"accept": false, "left": "pull_request:43486", "reason": "Unrelated fixes: batched video input handling vs tied-weight aliasing logic in state dict loading.", "right": "pull_request:44497"}, {"accept": false, "left": "pull_request:43778", "reason": "Mamba/FalconMamba weight initialization is unrelated to DeepSpeed ZeRO-3 init configuration.", "right": "pull_request:44236"}, {"accept": false, "left": "pull_request:42186", "reason": "Attention mask slicing in eager attention is unrelated to Gemma4 shared-weight removal/loading behavior.", "right": "pull_request:45336"}, {"accept": false, "left": "pull_request:43913", "reason": "Qwen3-VL-MoE weight mapping/conversion is unrelated to flash-attn compatibility deprecation helpers.", "right": "pull_request:45061"}, {"accept": false, "left": "pull_request:44236", "reason": "DeepSpeed ZeRO-3 init config and VLM conversion mappings are different code paths and different bugs.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43710", "reason": "Docs additions/fixes are unrelated to removing cache_position handling across model forward paths.", "right": "pull_request:44828"}, {"accept": false, "left": "pull_request:43341", "reason": "Skipping unsupported glm_image tests is unrelated to quantized-model DataParallel wrapping.", "right": "pull_request:45407"}, {"accept": false, "left": "pull_request:43910", "reason": "Jais2 expected-output test updates are unrelated to 4-bit DP wrapping behavior.", "right": "pull_request:45407"}, {"accept": false, "left": "pull_request:43936", "reason": "Moonshine streaming device-mismatch fix is a different model-specific bug than Trainer DataParallel wrapping.", "right": "pull_request:45407"}, {"accept": false, "left": "pull_request:45204", "reason": "VideoMT device placement fixes are unrelated to Trainer wrapping quantized models in DataParallel.", "right": "pull_request:45407"}], "summary": "This cluster is a false-positive mix: the PRs span unrelated model fixes, test updates, docs, conversion mapping, and utility changes. None of the soft-edge pairs look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44353|pull_request:45407", "pull_request:43486|pull_request:44497", "pull_request:43778|pull_request:44236", "pull_request:42186|pull_request:45336", "pull_request:43913|pull_request:45061", "pull_request:44236|pull_request:45340", "pull_request:43710|pull_request:44828", "pull_request:43341|pull_request:45407", "pull_request:43910|pull_request:45407", "pull_request:43936|pull_request:45407", "pull_request:45204|pull_request:45407"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20020, "estimated_input_tokens": 9882, "item_count": 17, "node_count": 17, "serialized_chars": 39525, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:28:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "67526e18fb03c102801dbd0483d10fa955184eac1c629fd7e209752922155928", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42186", "pull_request:43030", "pull_request:43424", "pull_request:43445", "pull_request:43579", "pull_request:43712", "pull_request:43942", "pull_request:43995", "pull_request:44037", "pull_request:44051", "pull_request:44235", "pull_request:44300", "pull_request:44330", "pull_request:44334", "pull_request:44414", "pull_request:45190", "pull_request:45214"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44300 is the strongest single representative for the cluster\u2019s loose theme of internal transformer plumbing/model-loading changes, though it still isn\u2019t a duplicate of the others.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44300 is the most central representative by sheer scope and discussion: it touches many model/test files and is a substantial infrastructure change, unlike the smaller one-off fixes around it.", "confidence": 0.19, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44037", "reason": "Different domains and code paths: qwen3_vl_moe weight mapping vs CLI type-casting cleanup. No shared underlying bug or change.", "right": "pull_request:45190"}, {"accept": false, "left": "pull_request:43030", "reason": "Both are refactors around model forward interfaces, but they target different models and different behavior; not the same concrete fix.", "right": "pull_request:43995"}, {"accept": false, "left": "pull_request:43445", "reason": "Both involve MoE/conversion logic, but one fixes specific router mappings while the other changes recursive weight conversion behavior across many models. Not mergeable as one duplicate PR.", "right": "pull_request:44300"}, {"accept": false, "left": "pull_request:44051", "reason": "Incorrect forward type hint in Gemma3n vs tokenizer-class registration for Fuyu are unrelated changes.", "right": "pull_request:44235"}, {"accept": false, "left": "pull_request:43712", "reason": "General model property typing cleanup and tqdm verbosity reduction are unrelated.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:43579", "reason": "XPU test support for solar_open and a Cohere ASR device-placement bug fix are different models and different issues.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:42186", "reason": "Both touch attention/caching across many models, but one removes mask slicing while the other removes cache_position usage. Different underlying bugs.", "right": "pull_request:44330"}, {"accept": false, "left": "pull_request:44334", "reason": "Cookiecutter CLI fix vs broader CLI typing refactor; related file, but not the same defect or change.", "right": "pull_request:45190"}, {"accept": false, "left": "pull_request:43424", "reason": "Executorch dynamic-shape export test and Bark test fixes are unrelated test additions in different subsystems.", "right": "pull_request:43942"}], "summary": "This cluster is not a true duplicate set: it contains a grab bag of unrelated model changes, test tweaks, typing cleanups, and conversion-mapping refactors. The only weak common thread is general transformer internals/model plumbing, but the concrete code paths differ too much to merge as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44037|pull_request:45190", "pull_request:43030|pull_request:43995", "pull_request:43445|pull_request:44300", "pull_request:44051|pull_request:44235", "pull_request:43712|pull_request:44414", "pull_request:43579|pull_request:45214", "pull_request:42186|pull_request:44330", "pull_request:44334|pull_request:45190", "pull_request:43424|pull_request:43942"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22580, "estimated_input_tokens": 11162, "item_count": 18, "node_count": 18, "serialized_chars": 44646, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:28:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8053efcf0b4d78e9f530a0f08e4bf764b51e8bf258d29312a6ad4234f03d1db8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42823", "pull_request:43098", "pull_request:43178", "pull_request:43247", "pull_request:43274", "pull_request:43345", "pull_request:43578", "pull_request:43690", "pull_request:43919", "pull_request:43942", "pull_request:43953", "pull_request:44033", "pull_request:44037", "pull_request:44082", "pull_request:44601", "pull_request:44833", "pull_request:45190", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44037 is the strongest single PR in the only likely duplicate pair, because it addresses the concrete conversion path with supporting code changes and tests; the other PRs are not close duplicates of each other.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44037 is the best canonical representative if one is needed: it is the more complete qwen3_vl_moe conversion fix, adds the transpose dim-check mechanism, updates loading logic, and includes tests. PR 43953 looks like a narrower precursor in the same area.", "confidence": 0.74, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43690", "reason": "Custom-model notebook/repl crash fix vs modular converter cleanup; different bugs and code paths.", "right": "pull_request:44833"}, {"accept": false, "left": "pull_request:43098", "reason": "PP-DocLayoutV3 model support vs native pipeline-parallel loading support; unrelated changes.", "right": "pull_request:44601"}, {"accept": false, "left": "pull_request:43247", "reason": "PP-OCRv5 mobile detector support vs distributed PP loading infrastructure; not the same fix.", "right": "pull_request:44601"}, {"accept": false, "left": "pull_request:43274", "reason": "PP-OCRv5 server detector support vs pipeline-parallel core loading; different model additions.", "right": "pull_request:44601"}, {"accept": false, "left": "pull_request:43345", "reason": "PP-LCNet model support vs distributed pipeline-parallel support; too broad to be duplicates.", "right": "pull_request:44601"}, {"accept": false, "left": "pull_request:42823", "reason": "LASR integration test/tokenizer fix vs Youtu repo alignment in tests; unrelated models and issues.", "right": "pull_request:43578"}, {"accept": false, "left": "pull_request:43942", "reason": "Bark test update vs modular converter regeneration; different purposes and artifacts.", "right": "pull_request:44833"}, {"accept": false, "left": "pull_request:45190", "reason": "CLI typing cleanup vs conversion-mapping updates for VLMs; same repo area but not the same underlying change.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43178", "reason": "AMD pipeline CI fix vs comment-CI workflow messaging change; distinct CI problems.", "right": "pull_request:44033"}, {"accept": false, "left": "pull_request:43919", "reason": "Gradient sync behavior in trainer vs PatchTSMixer post_init handling; separate code paths.", "right": "pull_request:44082"}, {"accept": true, "left": "pull_request:43953", "reason": "Both target qwen3_vl_moe conversion-mapping bugs in the same file; 44037 is the more complete implementation and plausibly subsumes the narrower mapping fix in 43953.", "right": "pull_request:44037"}], "summary": "This cluster is mostly a mix of unrelated model-support PRs and small CI/test/infra fixes. Only the qwen3_vl_moe conversion-mapping pair looks like it may be the same underlying fix; the rest are too far apart to merge as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43690|pull_request:44833", "pull_request:43098|pull_request:44601", "pull_request:43247|pull_request:44601", "pull_request:43274|pull_request:44601", "pull_request:43345|pull_request:44601", "pull_request:42823|pull_request:43578", "pull_request:43942|pull_request:44833", "pull_request:45190|pull_request:45340", "pull_request:43178|pull_request:44033", "pull_request:43919|pull_request:44082", "pull_request:43953|pull_request:44037"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 21062, "estimated_input_tokens": 10403, "item_count": 18, "node_count": 18, "serialized_chars": 41609, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:29:31Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "58288c9fe56481cd1e60652966e7d7f3a538b29956bae8cf68f40f6e589a44a0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40755", "pull_request:41763", "pull_request:43030", "pull_request:43326", "pull_request:43445", "pull_request:43486", "pull_request:43683", "pull_request:43851", "pull_request:43913", "pull_request:44126", "pull_request:44236", "pull_request:44433", "pull_request:44544", "pull_request:44770", "pull_request:44943", "pull_request:45190", "pull_request:45340", "pull_request:45429"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 41763 is the strongest single representative only because it is a large, merged TimesFM update and overlaps the TimesFM codebase with 40755; however, it is not a duplicate of the other items, so it is only a weak cluster anchor.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43683", "reason": "Different code paths and bugs: forced fp32 loading in core model loading vs input prep simplification in generation.", "right": "pull_request:44126"}, {"accept": false, "left": "pull_request:44236", "reason": "Unrelated changes: ZeRO-3 init config/loading behavior vs CLI typing/casting fixes.", "right": "pull_request:45190"}, {"accept": false, "left": "pull_request:43030", "reason": "Same broad ML library, but different model families and changes: TimesFM covariates feature vs MoE router remapping/fixes.", "right": "pull_request:43445"}, {"accept": false, "left": "pull_request:44770", "reason": "Both touch configs, but one fixes strict config construction across models while the other adjusts type hints and rope validation.", "right": "pull_request:44943"}, {"accept": false, "left": "pull_request:43851", "reason": "Different workflow files and purposes; only a superficial CI/workflow similarity.", "right": "pull_request:45429"}, {"accept": false, "left": "pull_request:43913", "reason": "Weight mapping/transpose sentinel logic vs terminal ANSI styling in loading reports; no shared bug or change.", "right": "pull_request:44544"}, {"accept": false, "left": "pull_request:43486", "reason": "Video batching shape handling vs MoE weight-transpose mapping; unrelated subsystems.", "right": "pull_request:43913"}, {"accept": false, "left": "pull_request:44433", "reason": "Multimodal token-id validation/error handling vs checkpoint conversion mapping updates; different concerns.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:40755", "reason": "Both TimesFM-related, but one adds covariate forecasting and the other introduces TimesFM 2.5; related model family, not the same concrete change.", "right": "pull_request:41763"}, {"accept": false, "left": "pull_request:43326", "reason": "MXFP4 dequantization fix vs loading report ANSI formatting fix; unrelated code paths.", "right": "pull_request:44544"}], "summary": "This cluster is heterogeneous: the pairs mostly span unrelated subsystems and change types. The only related area is TimesFM, but even there the PRs cover different changes (covariates vs a new TimesFM 2.5 model), so none should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43683|pull_request:44126", "pull_request:44236|pull_request:45190", "pull_request:43030|pull_request:43445", "pull_request:44770|pull_request:44943", "pull_request:43851|pull_request:45429", "pull_request:43913|pull_request:44544", "pull_request:43486|pull_request:43913", "pull_request:44433|pull_request:45340", "pull_request:40755|pull_request:41763", "pull_request:43326|pull_request:44544"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20498, "estimated_input_tokens": 10121, "item_count": 18, "node_count": 18, "serialized_chars": 40481, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:29:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4a9c3731d898223e519c1f214670e6b01545cc7535c25738cd2ba00344c2a152", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41722", "pull_request:42028", "pull_request:42186", "pull_request:43326", "pull_request:43486", "pull_request:43683", "pull_request:43823", "pull_request:43916", "pull_request:43920", "pull_request:43953", "pull_request:44037", "pull_request:44126", "pull_request:44251", "pull_request:44502", "pull_request:44544", "pull_request:45061", "pull_request:45078", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No single best PR as a duplicate representative; even the strongest-looking items (e.g. conversion mapping or generation changes) address distinct code paths and should not be merged together.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the set is heterogeneous, spanning docs, model additions, generation, loading, conversion mapping, and utility fixes that do not describe the same change.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43683", "reason": "Forced-fp32 loading vs a type-checker guard in import_utils are unrelated bugs in different code paths.", "right": "pull_request:44502"}, {"accept": false, "left": "pull_request:43683", "reason": "Model loading/upcasting fix and loading-report ANSI styling fix are separate utilities with no shared underlying issue.", "right": "pull_request:44544"}, {"accept": false, "left": "pull_request:43326", "reason": "mxfp4 dequantize and 5D video batching fixes touch unrelated subsystems and do not form one concrete change.", "right": "pull_request:43486"}, {"accept": false, "left": "pull_request:42186", "reason": "Eager attention mask slicing and bf16 MLU availability are unrelated problems despite both being small utility fixes.", "right": "pull_request:44502"}, {"accept": false, "left": "pull_request:43916", "reason": "One fixes generation input/embed handling; the other only updates test discovery for cache_utils. Not the same bug or change.", "right": "pull_request:43920"}, {"accept": false, "left": "pull_request:44037", "reason": "Qwen3-VL-MoE weight mapping and flash-attn version compatibility/deprecation are unrelated changes.", "right": "pull_request:45061"}, {"accept": false, "left": "pull_request:43953", "reason": "They both touch conversion_mapping.py, but they target different model families and different mapping logic; not one mergeable fix.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:41722", "reason": "Tamil README docs and MiniMax-M2 model support are completely separate additions.", "right": "pull_request:42028"}, {"accept": false, "left": "pull_request:43486", "reason": "Video batching bugfix and generate input-preparation simplification are different code paths and different failure modes.", "right": "pull_request:44126"}, {"accept": false, "left": "pull_request:45078", "reason": "Tokenizer conversion-error handling and vlm conversion mappings are distinct areas with no shared concrete bug.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43823", "reason": "MobileLLM and Jina-Embeddings-V3 are separate model additions, not duplicates or one underlying change.", "right": "pull_request:44251"}], "summary": "The soft-paired PRs are mostly unrelated and should remain separate; a few share filenames or subsystems, but the underlying bugs/changes differ. No true duplicate cluster emerges here."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43683|pull_request:44502", "pull_request:43683|pull_request:44544", "pull_request:43326|pull_request:43486", "pull_request:42186|pull_request:44502", "pull_request:43916|pull_request:43920", "pull_request:44037|pull_request:45061", "pull_request:43953|pull_request:45340", "pull_request:41722|pull_request:42028", "pull_request:43486|pull_request:44126", "pull_request:45078|pull_request:45340", "pull_request:43823|pull_request:44251"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19838, "estimated_input_tokens": 9791, "item_count": 17, "node_count": 17, "serialized_chars": 39161, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:30:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "449e4438b9d6316aa012eee9c59826c9eeaf453c3675b80bf77dbde3543fc0d4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40742", "pull_request:42823", "pull_request:42993", "pull_request:43445", "pull_request:43578", "pull_request:43740", "pull_request:43821", "pull_request:43902", "pull_request:43916", "pull_request:43919", "pull_request:43942", "pull_request:43946", "pull_request:44040", "pull_request:44235", "pull_request:45061", "pull_request:45164", "pull_request:45212"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43916 is the strongest standalone representative of a substantive core code change, with the most reviews and the widest code impact among the set.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR stands out as a duplicate representative; the items are too diverse. If forced to pick one central code fix, PR 43916 is the broadest and most discussion-heavy core-library change, but it is not a duplicate of the others.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43578", "reason": "Different targets and purposes: Youtu test repo alignment vs Fuyu tokenizer registration. No shared underlying bug or change.", "right": "pull_request:44235"}, {"accept": false, "left": "pull_request:42823", "reason": "LASR tokenizer mapping fix and Bark test refactor are unrelated model/test changes.", "right": "pull_request:43942"}, {"accept": false, "left": "pull_request:43740", "reason": "One is PyTorch-version code cleanup in model code; the other is documentation stack/version updates. Not the same fix.", "right": "pull_request:43902"}, {"accept": false, "left": "pull_request:40742", "reason": "Assuming torch in integrations and changing Docker install commands are different maintenance tasks, not one concrete bug.", "right": "pull_request:43946"}, {"accept": false, "left": "pull_request:42823", "reason": "LASR tokenizer selection and MoE router fixes touch different model families and different code paths.", "right": "pull_request:43445"}, {"accept": false, "left": "pull_request:42993", "reason": "Both mention XPU support, but one is benchmark_v2 platform support and the other is a MusicFlamingo test fixture update; too broad to be the same change.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:43919", "reason": "Trainer gradient synchronization logic and Gemma3n audio feature fixes are unrelated subsystems.", "right": "pull_request:44040"}, {"accept": false, "left": "pull_request:43821", "reason": "PEFT quantization typo fix and generation cache TypeError fix are different code paths and failure modes.", "right": "pull_request:45164"}, {"accept": false, "left": "pull_request:43916", "reason": "Input_embeds naming cleanup across generation/model docs is unrelated to flash-attn compatibility/deprecation helpers.", "right": "pull_request:45061"}], "summary": "This cluster is heterogeneous: it mixes unrelated PRs across model fixes, generation/training logic, docs, docker, benchmark support, and deprecation/API cleanup. None of the proposed soft edges look like the same underlying bug or change, so all should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43578|pull_request:44235", "pull_request:42823|pull_request:43942", "pull_request:43740|pull_request:43902", "pull_request:40742|pull_request:43946", "pull_request:42823|pull_request:43445", "pull_request:42993|pull_request:45212", "pull_request:43919|pull_request:44040", "pull_request:43821|pull_request:45164", "pull_request:43916|pull_request:45061"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 21112, "estimated_input_tokens": 10428, "item_count": 17, "node_count": 17, "serialized_chars": 41712, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:30:47Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "56558dd57531008837c2a65bfb710923205c1a38bdae5c58d5fb3ad74266ee0a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41549", "pull_request:43445", "pull_request:43486", "pull_request:43683", "pull_request:43781", "pull_request:43916", "pull_request:43953", "pull_request:43956", "pull_request:44037", "pull_request:44125", "pull_request:44293", "pull_request:44330", "pull_request:44414", "pull_request:44433", "pull_request:44502", "pull_request:44544", "pull_request:45155"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44037 is the strongest anchor PR: it is merged, substantive, and touches core loading plus conversion mapping, unlike the more isolated utility or model-specific patches.", "canonical_issue_reason": null, "canonical_pr_reason": "If a representative PR is needed, 44037 is the most central merged code-path fix in the loading/conversion area; the rest are narrower or unrelated fixes and do not form a duplicate set.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44414", "reason": "Different bugs: one is tqdm verbosity in model loading, the other is PEFT adapter loading with tensor parallelism.", "right": "pull_request:45155"}, {"accept": false, "left": "pull_request:41549", "reason": "Different subsystems and fixes: DETR refactor vs removing cache_position in many generation models.", "right": "pull_request:44330"}, {"accept": false, "left": "pull_request:44293", "reason": "Unrelated utility cleanup: export/terminal styling changes vs loading-progress verbosity.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:43445", "reason": "Both touch conversion_mapping, but they fix different model-specific MoE mappings and are not the same underlying bug.", "right": "pull_request:43956"}, {"accept": false, "left": "pull_request:44125", "reason": "Video processor loading error handling and multimodal token-type validation are unrelated issues.", "right": "pull_request:44433"}, {"accept": false, "left": "pull_request:43781", "reason": "PEFT local-adapter path handling is unrelated to qwen3_vl_moe checkpoint conversion mapping.", "right": "pull_request:43953"}, {"accept": false, "left": "pull_request:43445", "reason": "Same file only; the PRs address different MoE model mappings with different code paths and fixes.", "right": "pull_request:43953"}, {"accept": false, "left": "pull_request:43916", "reason": "Different concerns: generation input_embeds naming vs ANSI styling in loading reports.", "right": "pull_request:44544"}, {"accept": false, "left": "pull_request:43486", "reason": "One fixes batched video handling, the other harmonizes generation argument naming; no shared bug.", "right": "pull_request:43916"}, {"accept": false, "left": "pull_request:44037", "reason": "qwen3_vl_moe weight mapping and a torch MLU type-check fix are unrelated.", "right": "pull_request:44502"}, {"accept": false, "left": "pull_request:43683", "reason": "Forced fp32 loading behavior and qwen3_vl_moe transpose/check-dim mapping are different code-path problems.", "right": "pull_request:44037"}], "summary": "The cluster is mostly a grab bag of unrelated Transformers maintenance PRs across model loading, conversion mappings, video utils, PEFT, generation docs, and export/logging cleanup. None of the soft-edge pairs looks like the same underlying bug or a change that could realistically be merged into one PR."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44414|pull_request:45155", "pull_request:41549|pull_request:44330", "pull_request:44293|pull_request:44414", "pull_request:43445|pull_request:43956", "pull_request:44125|pull_request:44433", "pull_request:43781|pull_request:43953", "pull_request:43445|pull_request:43953", "pull_request:43916|pull_request:44544", "pull_request:43486|pull_request:43916", "pull_request:44037|pull_request:44502", "pull_request:43683|pull_request:44037"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20350, "estimated_input_tokens": 10047, "item_count": 17, "node_count": 17, "serialized_chars": 40185, "soft_pair_count": 14}, "cached_at": "2026-04-14T13:31:14Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "db4039ff9dd393908ed1b4f632e92bcfe6159f2c20df325ec16b050d27e4fcee", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42186", "pull_request:42823", "pull_request:43400", "pull_request:43578", "pull_request:43920", "pull_request:44040", "pull_request:44126", "pull_request:44235", "pull_request:44330", "pull_request:44502", "pull_request:44544", "pull_request:44833", "pull_request:44834", "pull_request:45061", "pull_request:45155", "pull_request:45284", "pull_request:45336"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:44330 is the strongest representative artifact: it is substantive, touches the shared generation/model path, and is closer in theme to the few cache-related PRs than the other largely unrelated changes.", "canonical_issue_reason": null, "canonical_pr_reason": "pull_request:44330 is the broadest concrete code-path change in the set, centered on a real framework behavior adjustment (`cache_position` removal) across multiple models, so it is the most representative PR if one must be picked.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43578", "reason": "Different changes: Youtu test repo alignment vs modular example converter cleanup. No shared bug or mergeable single fix.", "right": "pull_request:44834"}, {"accept": false, "left": "pull_request:44235", "reason": "Tokenizer registry tweak for Fuyu is unrelated to modular example regeneration.", "right": "pull_request:44834"}, {"accept": false, "left": "pull_request:44126", "reason": "Both touch generation/cache-related code, but one simplifies `prepare_inputs_for_generation` while the other removes `cache_position` from model implementations; not the same concrete fix.", "right": "pull_request:44330"}, {"accept": false, "left": "pull_request:44330", "reason": "`cache_position` model refactor and a Torch MLU type-check guard are unrelated.", "right": "pull_request:44502"}, {"accept": false, "left": "pull_request:44126", "reason": "Generation input prep refactor vs MLU availability/type-check fix; no shared underlying defect.", "right": "pull_request:44502"}, {"accept": false, "left": "pull_request:43920", "reason": "Updating the test fetcher for `cache_utils` is test infrastructure work, not the same change as removing `cache_position` from models.", "right": "pull_request:44330"}, {"accept": false, "left": "pull_request:43920", "reason": "Test-discovery config change and generation helper simplification are different concerns.", "right": "pull_request:44126"}, {"accept": false, "left": "pull_request:42186", "reason": "Attention mask slicing fix and generation input-prep simplification are not the same bug, despite both being framework internals.", "right": "pull_request:44126"}, {"accept": false, "left": "pull_request:42823", "reason": "LASR tokenizer test/registry update is unrelated to PEFT adapter loading with tensor parallelism.", "right": "pull_request:45155"}, {"accept": false, "left": "pull_request:45061", "reason": "Flash-Attn version/deprecation helper change is unrelated to adapter loading logic.", "right": "pull_request:45155"}, {"accept": false, "left": "pull_request:44040", "reason": "Gemma3n audio feature fix and modular example converter rerun are distinct changes with different code paths.", "right": "pull_request:44833"}, {"accept": false, "left": "pull_request:44502", "reason": "Type-check guard for MLU and flash-attn compatibility/deprecation work are unrelated.", "right": "pull_request:45061"}, {"accept": false, "left": "pull_request:44544", "reason": "Loading-report ANSI styling fix and Gemma4 shared-weight loading behavior are separate issues.", "right": "pull_request:45336"}, {"accept": false, "left": "pull_request:43400", "reason": "New VibeVoice tokenizer support and Qwen2 AMD CI expectation updates do not target the same problem.", "right": "pull_request:45284"}], "summary": "These pull requests are mostly unrelated one-off changes that only overlap by broad subsystem terms like cache, generation, or model internals. None of the soft edges looks like the same underlying bug/change, so all are rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43578|pull_request:44834", "pull_request:44235|pull_request:44834", "pull_request:44126|pull_request:44330", "pull_request:44330|pull_request:44502", "pull_request:44126|pull_request:44502", "pull_request:43920|pull_request:44330", "pull_request:43920|pull_request:44126", "pull_request:42186|pull_request:44126", "pull_request:42823|pull_request:45155", "pull_request:45061|pull_request:45155", "pull_request:44040|pull_request:44833", "pull_request:44502|pull_request:45061", "pull_request:44544|pull_request:45336", "pull_request:43400|pull_request:45284"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22082, "estimated_input_tokens": 10913, "item_count": 18, "node_count": 18, "serialized_chars": 43650, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:31:37Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a449ee231194f41057d2ca009e244ce33ca567a9dd3f06f887a64cbf6ce35075", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42823", "pull_request:43424", "pull_request:43445", "pull_request:43578", "pull_request:43588", "pull_request:43690", "pull_request:43702", "pull_request:43712", "pull_request:43913", "pull_request:43916", "pull_request:43953", "pull_request:44125", "pull_request:44293", "pull_request:44414", "pull_request:44433", "pull_request:44634", "pull_request:44866", "pull_request:45075"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44433 is the best overall choice because it is a broad, specific bug fix rather than a narrow test/typing/docs change or a feature add, and it has the clearest user-facing correctness impact.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44433 is the strongest standalone representative: it fixes a concrete multimodal bug with clear runtime impact, spans several related model files, and includes targeted tests.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43913", "reason": "Both touch model-loading/generation plumbing, but one fixes Qwen3-VL-MoE weight mapping while the other standardizes `inputs_embeds` terminology across generation code; different underlying problems.", "right": "pull_request:43916"}, {"accept": false, "left": "pull_request:43578", "reason": "Youtu test repo alignment and strict export cleanup are unrelated changes; one updates model test fixtures, the other changes export/runtime utilities and broad test behavior.", "right": "pull_request:44293"}, {"accept": false, "left": "pull_request:43445", "reason": "One fixes MoE router mappings and conversion logic; the other only reduces tqdm verbosity during loading. Same broad area, but not the same bug or change.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:44634", "reason": "Both are LFM2 cache/kernel work, but they fix different concrete issues: kernel path sequence-length handling vs cache alignment/state update semantics.", "right": "pull_request:44866"}, {"accept": false, "left": "pull_request:42823", "reason": "LASR tokenizer/integration work is unrelated to an executorch dynamic-shapes export test; these would not plausibly merge into one fix.", "right": "pull_request:43424"}, {"accept": false, "left": "pull_request:43690", "reason": "Both edit `modeling_utils.py`, but one fixes a notebook/repl crash while the other improves typing/documentation for model properties; no shared bug.", "right": "pull_request:43712"}, {"accept": false, "left": "pull_request:43588", "reason": "Qwen3 Omni feature return typing is unrelated to the multimodal token-type validation bug; different code paths and different fixes.", "right": "pull_request:44433"}, {"accept": false, "left": "pull_request:44125", "reason": "Video-processor loading errors and model-loading verbosity are unrelated; one is an API lookup bug, the other is logging noise during weight loading.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:43702", "reason": "Moonshine streaming support and DeepSeek-OCR-2 model addition are separate feature additions with different codebases and goals.", "right": "pull_request:45075"}, {"accept": false, "left": "pull_request:43588", "reason": "Qwen3 Omni feature cleanup and Qwen3-VL-MoE conversion mapping are distinct changes; shared Qwen family naming is not enough to treat them as duplicates.", "right": "pull_request:43953"}], "summary": "The soft-similarity pairs are mostly false positives: they share a subsystem or file family, but each PR addresses a different concrete bug, feature, or test update. No pair is a safe duplicate merge candidate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43913|pull_request:43916", "pull_request:43578|pull_request:44293", "pull_request:43445|pull_request:44414", "pull_request:44634|pull_request:44866", "pull_request:42823|pull_request:43424", "pull_request:43690|pull_request:43712", "pull_request:43588|pull_request:44433", "pull_request:44125|pull_request:44414", "pull_request:43702|pull_request:45075", "pull_request:43588|pull_request:43953"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20600, "estimated_input_tokens": 10172, "item_count": 18, "node_count": 18, "serialized_chars": 40685, "soft_pair_count": 9}, "cached_at": "2026-04-14T13:32:01Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "42fddc6b88c2983a7a39f0d340544e25d68f3917a6adc2d46c92c073e3c8e508", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41763", "pull_request:43178", "pull_request:43424", "pull_request:43486", "pull_request:43565", "pull_request:43578", "pull_request:43588", "pull_request:43683", "pull_request:43953", "pull_request:43973", "pull_request:44293", "pull_request:44812", "pull_request:45061", "pull_request:45078", "pull_request:45123", "pull_request:45170", "pull_request:45190", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45340 is the closest thing to a representative PR because it is a substantive code change with an inbound reference, but it still does not represent the rest of the cluster well.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the cluster spans unrelated PRs rather than one underlying change. Similar filenames or shared subsystems are not enough to treat them as duplicates.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44293", "reason": "Both touch import_utils, but 44293 is a strict-export cleanup and 45061 adds a deprecated flash-attn compatibility helper; different underlying changes.", "right": "pull_request:45061"}, {"accept": false, "left": "pull_request:41763", "reason": "Both add model documentation/code for new audio/time-series models, but they are unrelated model additions with different architectures and files.", "right": "pull_request:43973"}, {"accept": false, "left": "pull_request:43565", "reason": "Both modify the repo consistency workflow, but one changes bot permissions/deletes a style bot workflow while the other adds editable-install setup; separate fixes.", "right": "pull_request:44812"}, {"accept": false, "left": "pull_request:43953", "reason": "These are unrelated: one fixes Qwen3VL-MoE conversion mapping, the other changes tokenizer conversion/error behavior.", "right": "pull_request:45078"}, {"accept": false, "left": "pull_request:43486", "reason": "One fixes 5D video batching and the other fixes fp32 upcasting in model loading; no shared bug or mergeable combined change.", "right": "pull_request:43683"}, {"accept": false, "left": "pull_request:43424", "reason": "These are separate test-focused PRs for different models/features (executorch dynamic shapes vs. Youtu repo alignment).", "right": "pull_request:43578"}, {"accept": false, "left": "pull_request:43178", "reason": "Pipeline AMD CI fixes and PP chart2table OCR test updates are unrelated changes, despite both being test/CI oriented.", "right": "pull_request:45123"}, {"accept": false, "left": "pull_request:45170", "reason": "One fixes a layrnorm/layernorm conversion typo across CLIP-like models; the other is a broad CLI typing cleanup. Different code paths.", "right": "pull_request:45190"}, {"accept": false, "left": "pull_request:43588", "reason": "Qwen3 Omni-MoE feature/output typing fix and vlm conversion-mapping changes are not the same bug or change and would not merge cleanly together.", "right": "pull_request:45340"}], "summary": "This cluster is not a true duplicate set. The soft pairs are mostly superficial similarity within the same broad subsystem (utilities, workflows, conversion mapping, tests) but they address different concrete bugs or additions, so none should be merged together."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44293|pull_request:45061", "pull_request:41763|pull_request:43973", "pull_request:43565|pull_request:44812", "pull_request:43953|pull_request:45078", "pull_request:43486|pull_request:43683", "pull_request:43424|pull_request:43578", "pull_request:43178|pull_request:45123", "pull_request:45170|pull_request:45190", "pull_request:43588|pull_request:45340"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18222, "estimated_input_tokens": 8983, "item_count": 18, "node_count": 18, "serialized_chars": 35930, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:32:21Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "888cc36dd5043ca24dd64fec494d8526264c8dbba29ba0756aedd98cd7c703a9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43424", "pull_request:43426", "pull_request:43488", "pull_request:43532", "pull_request:43821", "pull_request:43919", "pull_request:43920", "pull_request:43982", "pull_request:44040", "pull_request:44163", "pull_request:44286", "pull_request:44293", "pull_request:44502", "pull_request:44544", "pull_request:44566", "pull_request:45164", "pull_request:45284", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "If forced to pick one standalone representative, PR 45340 is the most substantive and self-contained code fix, but it does not actually represent the cluster as a whole.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR is a sensible canonical representative because the cluster spans multiple unrelated fixes across different subsystems; the pairwise similarities are superficial.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43821", "reason": "Different fixes: PEFT quantization typo vs conversion mapping updates for VLMs; no shared code path.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44040", "reason": "Gemma3n audio feature fix vs model conversion mapping changes; unrelated changes.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44163", "reason": "ESM attention_mask/token_dropout bugfix vs conversion mapping refactor; different subsystems and behaviors.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43920", "reason": "One changes test selection for cache_utils, the other changes export cleanup behavior; they are not the same bug or fix.", "right": "pull_request:44293"}, {"accept": false, "left": "pull_request:43919", "reason": "Gradient synchronization logic vs ANSI/style formatting in loading reports; unrelated functionality.", "right": "pull_request:44544"}, {"accept": false, "left": "pull_request:44502", "reason": "Torch MLU availability guard vs GenerationMixin NoneType iteration fix; distinct issues in separate code paths.", "right": "pull_request:45164"}, {"accept": false, "left": "pull_request:43424", "reason": "Executorch dynamic-shape test addition vs Fouroversix quantization/model support; no common concrete bug.", "right": "pull_request:44286"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenizers cleanup behavior restoration vs typing/CLI refactor; different concerns and no mergeable single change.", "right": "pull_request:44566"}, {"accept": false, "left": "pull_request:43982", "reason": "Blame-ignore metadata file vs Qwen2 AMD test expectation adjustments; unrelated changes.", "right": "pull_request:45284"}, {"accept": false, "left": "pull_request:43488", "reason": "Both are repo/test workflow-related, but one is a setup/version/dependency tweak and the other rewrites CI/comment workflows; not the same underlying change.", "right": "pull_request:43532"}], "summary": "This cluster is highly heterogeneous: every soft pair links unrelated PRs that only share broad surface themes (tests, infra, typing, or model code) rather than the same underlying bug or change. No duplicate consolidation is warranted."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43821|pull_request:45340", "pull_request:44040|pull_request:45340", "pull_request:44163|pull_request:45340", "pull_request:43920|pull_request:44293", "pull_request:43919|pull_request:44544", "pull_request:44502|pull_request:45164", "pull_request:43424|pull_request:44286", "pull_request:43426|pull_request:44566", "pull_request:43982|pull_request:45284", "pull_request:43488|pull_request:43532"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20676, "estimated_input_tokens": 10210, "item_count": 17, "node_count": 17, "serialized_chars": 40838, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:32:47Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ff9c3d1ce3b3e4da21da639ed2abbd7dd00524eb9ea5b8d41436dab1e91ab1a3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43486", "pull_request:43740", "pull_request:43767", "pull_request:43793", "pull_request:43795", "pull_request:43823", "pull_request:43919", "pull_request:43930", "pull_request:44037", "pull_request:44544", "pull_request:44566", "pull_request:44601", "pull_request:44724", "pull_request:45164", "pull_request:45224", "pull_request:45289", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No PR cleanly represents the whole cluster because the items are unrelated; any representative would be arbitrary.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR is a credible canonical duplicate target; the linked PRs solve different concrete problems across unrelated code paths.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44724", "reason": "Both touch auto mappings, but 44724 adds/fixes missing entries while 45224 removes unnecessary ones; different changes, not one underlying bug.", "right": "pull_request:45224"}, {"accept": false, "left": "pull_request:43767", "reason": "43767 is PP-Chart2Table model support; 44601 is native pipeline-parallel loading support. Different subsystems and goals.", "right": "pull_request:44601"}, {"accept": false, "left": "pull_request:43793", "reason": "Model support for PP-OCRv5_mobile_rec is unrelated to adding pipeline-parallel support in core loading.", "right": "pull_request:44601"}, {"accept": false, "left": "pull_request:43795", "reason": "PP-OCRv5_server_rec model support and native PP loading are distinct changes with no shared concrete bug.", "right": "pull_request:44601"}, {"accept": false, "left": "pull_request:44544", "reason": "One fixes ANSI styling in loading reports; the other fixes a NoneType iteration error in generation. Different code paths.", "right": "pull_request:45164"}, {"accept": false, "left": "pull_request:43486", "reason": "Video batching for 5D arrays and gradient synchronization in training are unrelated fixes.", "right": "pull_request:43919"}, {"accept": false, "left": "pull_request:43740", "reason": "Removing PyTorch <2.4 code paths is unrelated to improving autocast detection/usage.", "right": "pull_request:43930"}, {"accept": false, "left": "pull_request:43823", "reason": "MobileLLM model addition does not match fixing conversion mappings for VLMs.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:45289", "reason": "RoPE warning validation tweaks are unrelated to conversion-mapping changes for VLMs.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44037", "reason": "A qwen3_vl_moe weight-mapping fix and a typing/CLI extension are different kinds of changes with no shared bug.", "right": "pull_request:44566"}, {"accept": false, "left": "pull_request:43486", "reason": "Video utility batching and generation cache handling are separate bugs in different modules.", "right": "pull_request:45164"}], "summary": "This cluster is heterogeneous: model support additions, auto-mapping cleanup, docs/typing changes, and unrelated utility/training fixes. The soft links are all false positives rather than true duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44724|pull_request:45224", "pull_request:43767|pull_request:44601", "pull_request:43793|pull_request:44601", "pull_request:43795|pull_request:44601", "pull_request:44544|pull_request:45164", "pull_request:43486|pull_request:43919", "pull_request:43740|pull_request:43930", "pull_request:43823|pull_request:45340", "pull_request:45289|pull_request:45340", "pull_request:44037|pull_request:44566", "pull_request:43486|pull_request:45164"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19710, "estimated_input_tokens": 9727, "item_count": 18, "node_count": 18, "serialized_chars": 38907, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:33:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5b0b41bc9a48785c4b601ffa00f24228a6f25bc19d030642af1029611a9937fc", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43424", "pull_request:43445", "pull_request:43464", "pull_request:43578", "pull_request:43592", "pull_request:43821", "pull_request:43942", "pull_request:43956", "pull_request:43989", "pull_request:44040", "pull_request:44250", "pull_request:44286", "pull_request:44330", "pull_request:44502", "pull_request:44827", "pull_request:44834", "pull_request:45033", "pull_request:45284"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44330 is the strongest representative code fix in the set: it addresses a concrete `cache_position`/causal-mask path across multiple models and includes coordinated code changes, making it more substantive than the test-only or typo-only PRs.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43445", "reason": "MoE router checkpoint mapping fix vs. MarkupLM integration test dtype tweak; unrelated subsystems and bugs.", "right": "pull_request:43464"}, {"accept": false, "left": "pull_request:43942", "reason": "Bark test coverage change vs. training_args `report_to=\"all\"` regression; different functionality and code paths.", "right": "pull_request:44250"}, {"accept": false, "left": "pull_request:44502", "reason": "Torch bf16 availability type-checker guard vs. AMD Qwen2 expectation updates; unrelated fixes, one utility-level and one test expectation change.", "right": "pull_request:45284"}, {"accept": false, "left": "pull_request:44330", "reason": "Model runtime `cache_position` handling across many architectures vs. modular example cleanup; not the same underlying bug or mergeable change.", "right": "pull_request:44834"}, {"accept": false, "left": "pull_request:43578", "reason": "Youtu test repo alignment vs. Bark test enablement; both are test-only changes but for different models and failure modes.", "right": "pull_request:43942"}, {"accept": false, "left": "pull_request:43424", "reason": "Executorch dynamic-shape test addition vs. `report_to` handling regression fix; no shared code path.", "right": "pull_request:44250"}, {"accept": false, "left": "pull_request:44286", "reason": "Fouroversix quantization support vs. image-processing kwargs BC alias; entirely separate integrations and behaviors.", "right": "pull_request:45033"}, {"accept": false, "left": "pull_request:43956", "reason": "Qwen3-VL-MoE weight mapping fix vs. Mistral4 test fix; different model families and different problem types.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:43989", "reason": "AutoVideoProcessor lookup guard vs. Gemma3n audio-feature fix; both are model-adjacent but not the same concrete bug.", "right": "pull_request:44040"}, {"accept": false, "left": "pull_request:43592", "reason": "Config default propagation cleanup across many models vs. PEFT quantization typo fix; unrelated changes.", "right": "pull_request:43821"}, {"accept": false, "left": "pull_request:43592", "reason": "Broad configuration-default refactor vs. Gemma3n `get_audio_features` bugfix; not the same underlying change, despite one shared test file reference.", "right": "pull_request:44040"}], "summary": "This cluster is heterogeneous: it mixes unrelated model test fixes, configuration cleanups, utility bugfixes, and MoE/auto-loader changes. The soft edges are similarity noise from working in the same Transformers codebase, not evidence of the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43445|pull_request:43464", "pull_request:43942|pull_request:44250", "pull_request:44502|pull_request:45284", "pull_request:44330|pull_request:44834", "pull_request:43578|pull_request:43942", "pull_request:43424|pull_request:44250", "pull_request:44286|pull_request:45033", "pull_request:43956|pull_request:44827", "pull_request:43989|pull_request:44040", "pull_request:43592|pull_request:43821", "pull_request:43592|pull_request:44040"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19574, "estimated_input_tokens": 9659, "item_count": 17, "node_count": 17, "serialized_chars": 38636, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:33:36Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c3024b8e386d71ffd8c3763121dc87d52b9e426878f3673424eab4c6ece79fb0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41145", "pull_request:42668", "pull_request:43251", "pull_request:43592", "pull_request:43838", "pull_request:43913", "pull_request:43938", "pull_request:44163", "pull_request:44250", "pull_request:44739", "pull_request:44834", "pull_request:45033", "pull_request:45078", "pull_request:45190", "pull_request:45283", "pull_request:45289", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "43838 is the most representative standalone PR in terms of scope and integration breadth, but it should not be treated as equivalent to the other PRs in this set.", "canonical_issue_reason": null, "canonical_pr_reason": "No PR is a true duplicate center for the cluster; 43838 is the broadest coherent feature PR with an explicit issue target, so it is the closest anchor among otherwise unrelated changes.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43592", "reason": "Different bugs: 43592 fixes default config values across many models, while 44163 fixes ESM attention_mask propagation into embeddings/token_dropout.", "right": "pull_request:44163"}, {"accept": false, "left": "pull_request:43592", "reason": "Unrelated areas: config default cleanup vs tokenizer auto-selection/error handling.", "right": "pull_request:45078"}, {"accept": false, "left": "pull_request:43592", "reason": "Different changes: config default values vs RoPE factor validation warnings.", "right": "pull_request:45289"}, {"accept": false, "left": "pull_request:43938", "reason": "Both are test-fix style PRs, but for different models and failures; they do not fix the same underlying code path.", "right": "pull_request:44739"}, {"accept": false, "left": "pull_request:45033", "reason": "Backwards-compatibility alias in image processing is unrelated to CLI typing/casting fixes.", "right": "pull_request:45190"}, {"accept": false, "left": "pull_request:43251", "reason": "Cross-entropy kwargs forwarding is unrelated to qwen3_vl_moe weight mapping and transpose/sentinel logic.", "right": "pull_request:43913"}, {"accept": false, "left": "pull_request:42668", "reason": "Different scopes: processor loading robustness vs modular example cleanups; no shared concrete bug or mergeable change.", "right": "pull_request:44834"}, {"accept": false, "left": "pull_request:44250", "reason": "Report_to handling in training args is unrelated to conversion-mapping fixes for VLMs.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:41145", "reason": "Both concern Qwen3-family support, but one adds rope kernel support for Qwen3 and the other adds Qwen3-ASR model/processor support; distinct feature work, not the same change.", "right": "pull_request:43838"}, {"accept": false, "left": "pull_request:43838", "reason": "Qwen3-ASR model support and Qwen3.5 GGUF loading support are separate features touching different code paths and targets.", "right": "pull_request:45283"}], "summary": "This cluster is mostly a collection of unrelated Transformers PRs: model support additions, config/default tweaks, auto-mapping changes, and a few isolated bug/test fixes. The soft pairs share only broad topical similarity (same library or model family) and not the same concrete code-path change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43592|pull_request:44163", "pull_request:43592|pull_request:45078", "pull_request:43592|pull_request:45289", "pull_request:43938|pull_request:44739", "pull_request:45033|pull_request:45190", "pull_request:43251|pull_request:43913", "pull_request:42668|pull_request:44834", "pull_request:44250|pull_request:45340", "pull_request:41145|pull_request:43838", "pull_request:43838|pull_request:45283"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20316, "estimated_input_tokens": 10030, "item_count": 18, "node_count": 18, "serialized_chars": 40119, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:33:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c67c8d4e5e57a3effc190bb89aebbd43f2b2c0364e9b235fb234aa51a1a3cd11", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42668", "pull_request:43424", "pull_request:43445", "pull_request:43690", "pull_request:43823", "pull_request:43948", "pull_request:44082", "pull_request:44125", "pull_request:44296", "pull_request:44330", "pull_request:44433", "pull_request:44502", "pull_request:44544", "pull_request:44883", "pull_request:45033", "pull_request:45221", "pull_request:45284", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44433", "reason": "Different subsystems and bugs: multimodal RoPE missing `mm_token_type_ids` vs a type-checker guard in `import_utils`.", "right": "pull_request:44502"}, {"accept": false, "left": "pull_request:42668", "reason": "AudioFlamingo3 processor auto-loading fixes are unrelated to adding a new MobileLLM model.", "right": "pull_request:43823"}, {"accept": false, "left": "pull_request:44125", "reason": "One changes video-processor class lookup; the other adds an audio-from-video-file error path. Different code paths and user-facing problems.", "right": "pull_request:45221"}, {"accept": false, "left": "pull_request:44296", "reason": "Auto-docstring generation for configs is unrelated to AMD CI expectation updates in Qwen2 tests.", "right": "pull_request:45284"}, {"accept": false, "left": "pull_request:44883", "reason": "State-dict dtype inference and conversion-mapping updates for VLMs address unrelated functionality.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43690", "reason": "Custom-model notebook/repl crash fix has no meaningful overlap with checkpoint conversion mappings.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43445", "reason": "MoE router weight renaming and image-processing kwargs backward compatibility are different changes.", "right": "pull_request:45033"}, {"accept": false, "left": "pull_request:43948", "reason": "Image token counting and removal of `cache_position` in models are separate model-behavior changes, not the same bug.", "right": "pull_request:44330"}, {"accept": false, "left": "pull_request:43424", "reason": "Executorch dynamic-shape export tests are unrelated to the `_further_process_kwargs` compatibility alias.", "right": "pull_request:45033"}, {"accept": false, "left": "pull_request:44082", "reason": "PatchTSMixer `post_init` handling and loading-report ANSI styling are unrelated utility/model fixes.", "right": "pull_request:44544"}], "summary": "No true duplicate cluster is present here: the candidate pairs span unrelated fixes across multimodal modeling, auto-loading, import utilities, docs, tests, and conversion mappings. None look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44433|pull_request:44502", "pull_request:42668|pull_request:43823", "pull_request:44125|pull_request:45221", "pull_request:44296|pull_request:45284", "pull_request:44883|pull_request:45340", "pull_request:43690|pull_request:45340", "pull_request:43445|pull_request:45033", "pull_request:43948|pull_request:44330", "pull_request:43424|pull_request:45033", "pull_request:44082|pull_request:44544"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19078, "estimated_input_tokens": 9411, "item_count": 17, "node_count": 17, "serialized_chars": 37643, "soft_pair_count": 12}, "cached_at": "2026-04-14T13:34:26Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d58e11d04ec647fc0a2e9f303f4ba5fd2af43006fe40c7574f9edad70214eb86", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42028", "pull_request:43251", "pull_request:43486", "pull_request:43588", "pull_request:43702", "pull_request:43821", "pull_request:43823", "pull_request:43982", "pull_request:44040", "pull_request:44082", "pull_request:44163", "pull_request:44236", "pull_request:44414", "pull_request:44544", "pull_request:45078", "pull_request:45155", "pull_request:45289"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No PR is a strong global representative for duplicate triage because the set is heterogeneous; the largest/most central-looking changes are still unrelated to the others.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR is a valid canonical duplicate here; the items span unrelated features and fixes across different code paths.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43251", "reason": "Different fixes: cross_entropy kwargs forwarding vs a PEFT quantization typo. Same area of code is not enough.", "right": "pull_request:43821"}, {"accept": false, "left": "pull_request:43982", "reason": "Unrelated changes: adding a blame-ignore file vs zero3 init/dependency handling.", "right": "pull_request:44236"}, {"accept": false, "left": "pull_request:43982", "reason": "A repo housekeeping file and a PEFT adapter loading change are not the same bug/change.", "right": "pull_request:45155"}, {"accept": false, "left": "pull_request:43486", "reason": "Video batching bugfix vs PatchTSMixer post_init/config cleanup; different code paths and symptoms.", "right": "pull_request:44082"}, {"accept": false, "left": "pull_request:44544", "reason": "Loading-report ANSI styling is unrelated to tokenizer conversion/error logic.", "right": "pull_request:45078"}, {"accept": false, "left": "pull_request:43823", "reason": "Model addition for MobileLLM is unrelated to reducing tqdm verbosity during loading.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:43588", "reason": "Qwen3 omni feature-type fix and model-loading verbosity are different changes.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:43821", "reason": "PEFT quantization typo fix is not the same underlying issue as model-loading tqdm cleanup.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:44040", "reason": "Gemma3n audio feature fix and loading verbosity reduction are distinct changes.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:44163", "reason": "ESM2 token_dropout/attention_mask fix is unrelated to model-loading progress output.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:44414", "reason": "One changes tqdm verbosity during loading, the other suppresses RoPE validation warnings; different behaviors and code paths.", "right": "pull_request:45289"}, {"accept": false, "left": "pull_request:42028", "reason": "MiniMax-M2 support and Moonshine streaming are separate model additions, not duplicate fixes.", "right": "pull_request:43702"}], "summary": "The candidate pairs are only superficially similar (shared subsystems or utility code) and do not appear to fix the same underlying bug or change. All soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43251|pull_request:43821", "pull_request:43982|pull_request:44236", "pull_request:43982|pull_request:45155", "pull_request:43486|pull_request:44082", "pull_request:44544|pull_request:45078", "pull_request:43823|pull_request:44414", "pull_request:43588|pull_request:44414", "pull_request:43821|pull_request:44414", "pull_request:44040|pull_request:44414", "pull_request:44163|pull_request:44414", "pull_request:44414|pull_request:45289", "pull_request:42028|pull_request:43702"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19712, "estimated_input_tokens": 9728, "item_count": 18, "node_count": 18, "serialized_chars": 38911, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:35:08Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b08cf5b58ef434f2e8de96b72611e73f44d568fd714e73b8f693c20f904db549", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43251", "pull_request:43426", "pull_request:43711", "pull_request:43823", "pull_request:43913", "pull_request:43973", "pull_request:43982", "pull_request:44126", "pull_request:44163", "pull_request:44250", "pull_request:44293", "pull_request:44386", "pull_request:44801", "pull_request:44827", "pull_request:45044", "pull_request:45170", "pull_request:45221", "pull_request:45283"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44293 is the strongest standalone representative of a substantial core/runtime fix, but it still does not match the rest of the cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR stands out; the items do not form a single duplicate-change group.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44386", "reason": "Both touch audio-related code, but one fixes generation sampling for Higgs Audio v2 while the other adds a user-friendly error for loading video files as audio. Different bugs, different code paths.", "right": "pull_request:45221"}, {"accept": false, "left": "pull_request:43823", "reason": "MobileLLM model addition versus qwen3_vl_moe weight-mapping/sentinel conversion fix. Same broad model code area, but not the same change or bug.", "right": "pull_request:43913"}, {"accept": false, "left": "pull_request:44126", "reason": "One simplifies generation input prep; the other fixes a layernorm naming typo across CLIP-like models. No shared underlying bug.", "right": "pull_request:45170"}, {"accept": false, "left": "pull_request:44250", "reason": "`report_to=\"all\"` regression fix versus tokenizer-class mapping updates for DeepSeek/ModernBERT. Completely unrelated.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:43973", "reason": "Lfm2 audio model integration versus Qwen3.5 GGUF loading support. Both model-adjacent, but distinct additions with different targets.", "right": "pull_request:45283"}, {"accept": false, "left": "pull_request:43913", "reason": "qwen3_vl_moe conversion/sentinel fix versus strict export cleanup in import/output-capturing utilities. Different subsystems and problems.", "right": "pull_request:44293"}, {"accept": false, "left": "pull_request:44163", "reason": "ESM attention_mask/token_dropout fix versus tokenizer backend class mapping updates. Not the same code path or bug.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:43711", "reason": "Scheduler initialization order fix versus `report_to='all'` regression. Both trainer-related, but unrelated issues.", "right": "pull_request:44250"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenizers backend cleanup restoration versus adding Mistral4 tests/fixes. No concrete overlap.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenization cleanup behavior versus creation of `.git-blame-ignore-revs`. Entirely unrelated.", "right": "pull_request:43982"}, {"accept": false, "left": "pull_request:43251", "reason": "Cross-entropy kwargs passthrough versus Janus image-generation bug fix. Different models and different failure modes.", "right": "pull_request:45044"}], "summary": "This cluster is heterogeneous: it mixes unrelated bug fixes, feature additions, conversion/mapping changes, and docs/tests across different subsystems. None of the soft pairs look like the same underlying change, so I reject all of them."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44386|pull_request:45221", "pull_request:43823|pull_request:43913", "pull_request:44126|pull_request:45170", "pull_request:44250|pull_request:44801", "pull_request:43973|pull_request:45283", "pull_request:43913|pull_request:44293", "pull_request:44163|pull_request:44801", "pull_request:43711|pull_request:44250", "pull_request:43426|pull_request:44827", "pull_request:43426|pull_request:43982", "pull_request:43251|pull_request:45044"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18532, "estimated_input_tokens": 9138, "item_count": 18, "node_count": 18, "serialized_chars": 36551, "soft_pair_count": 12}, "cached_at": "2026-04-14T13:35:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c263db115d609eb74b4b5f7fe7fb55fbcee2dabf9962a8e0db4d07c07bcae2fd", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42823", "pull_request:43426", "pull_request:43532", "pull_request:43563", "pull_request:43588", "pull_request:43690", "pull_request:43953", "pull_request:43982", "pull_request:43995", "pull_request:44082", "pull_request:44163", "pull_request:44414", "pull_request:44566", "pull_request:44801", "pull_request:44827", "pull_request:44883", "pull_request:45164", "pull_request:45289"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:45164 is the strongest standalone PR here: it addresses a specific TypeError in GenerationMixin.generate with a tight, well-scoped fix.", "canonical_issue_reason": null, "canonical_pr_reason": "No issue in the cluster. For a representative PR, pull_request:45164 is the cleanest self-contained runtime bug fix with a clear title and minimal diff.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44566", "reason": "Different changes: CLI typing expansion versus tokenizer-class mapping for DeepSeek/ModernBERT. No shared code path or bug.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:43532", "reason": "Both are unrelated prank/style PRs touching tests, not the same underlying fix or change.", "right": "pull_request:43563"}, {"accept": false, "left": "pull_request:43953", "reason": "Conversion-mapping update for Qwen3VL-MoE versus PatchTSMixer post_init cleanup are unrelated.", "right": "pull_request:44082"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenizers cleanup behavior and dtype guessing in model loading are distinct code paths and bugs.", "right": "pull_request:44883"}, {"accept": false, "left": "pull_request:43426", "reason": "One restores tokenizers cleanup_spaces behavior; the other fixes ESM token_dropout/attention_mask handling. Not the same issue.", "right": "pull_request:44163"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenization cleanup versus RoPE parameter validation are different subsystems and fixes.", "right": "pull_request:45289"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenizers decode cleanup and Falcon output-collection refactor are unrelated.", "right": "pull_request:43995"}, {"accept": false, "left": "pull_request:42823", "reason": "LASR tokenizer/test integration work and adding a git-blame-ignore-revs file are unrelated changes.", "right": "pull_request:43982"}, {"accept": false, "left": "pull_request:43690", "reason": "Custom-model notebook/repl crash fix versus Qwen3VL-MoE conversion mapping are unrelated.", "right": "pull_request:43953"}, {"accept": false, "left": "pull_request:44801", "reason": "Tokenizer-class hub fix and RoPE warning suppression address different bugs in different areas.", "right": "pull_request:45289"}, {"accept": false, "left": "pull_request:44414", "reason": "Model-loading tqdm verbosity reduction and Mistral4 test fixes are not the same code-path problem.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:43588", "reason": "Qwen3 Omni feature extraction fix and GenerationMixin NoneType iterable fix are unrelated runtime issues.", "right": "pull_request:45164"}], "summary": "This cluster is a loose semantic grouping of unrelated PRs. None of the soft-edge pairs appear to fix the same concrete bug or implement the same change, so all candidate duplicate links should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44566|pull_request:44801", "pull_request:43532|pull_request:43563", "pull_request:43953|pull_request:44082", "pull_request:43426|pull_request:44883", "pull_request:43426|pull_request:44163", "pull_request:43426|pull_request:45289", "pull_request:43426|pull_request:43995", "pull_request:42823|pull_request:43982", "pull_request:43690|pull_request:43953", "pull_request:44801|pull_request:45289", "pull_request:44414|pull_request:44827", "pull_request:43588|pull_request:45164"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18790, "estimated_input_tokens": 9267, "item_count": 18, "node_count": 18, "serialized_chars": 37067, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:35:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f6e6c9ce42968f8e60a214978373c53733318a021fcf8c893f81357a9450c93c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43500", "pull_request:43558", "pull_request:43592", "pull_request:43821", "pull_request:43889", "pull_request:43946", "pull_request:43995", "pull_request:44053", "pull_request:44126", "pull_request:44414", "pull_request:44417", "pull_request:44620", "pull_request:44634", "pull_request:44724", "pull_request:44827", "pull_request:44883", "pull_request:45284", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "45340 is the most substantial merged PR in the set and touches shared conversion infrastructure, so it is the least-bad representative; however, the cluster is not genuinely duplicate-related.", "canonical_issue_reason": null, "canonical_pr_reason": "There is no true canonical PR for this cluster because the items are heterogeneous. If forced to pick the broadest merged infrastructure change, 45340 is the closest representative, but it is not a duplicate target for the rest.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44620", "reason": "Different subsystems and goals: serve API typing/422 fix vs AMD CI expectation updates for Qwen2 tests.", "right": "pull_request:45284"}, {"accept": false, "left": "pull_request:43995", "reason": "Falcon output-interface refactor and model-loading tqdm verbosity are unrelated code paths.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:44827", "reason": "Mistral4 test/model work and VLM conversion mapping fixes are distinct changes with no shared bug.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44126", "reason": "Generation input simplification and Neuron kernels integration are different features in different files.", "right": "pull_request:44417"}, {"accept": false, "left": "pull_request:43821", "reason": "Both touch peft.py, but they correct different attribute names; this looks like separate typo fixes, not one duplicate change.", "right": "pull_request:44053"}, {"accept": false, "left": "pull_request:43889", "reason": "Jamba slow-path fallback and LFM2 kernel-path fixes address different model implementations and bugs.", "right": "pull_request:44634"}, {"accept": false, "left": "pull_request:44620", "reason": "Serve 422 handling and Mistral4 test fixes are unrelated.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:43500", "reason": "These are separate no-op test-file edits with different injected lines; not the same underlying bug or change.", "right": "pull_request:43558"}, {"accept": false, "left": "pull_request:43946", "reason": "Dockerfile dependency updates and auto-file registry entries are unrelated maintenance changes.", "right": "pull_request:44724"}, {"accept": false, "left": "pull_request:43592", "reason": "Configuration default-value cleanup and dtype inference logic are different issues in different modules.", "right": "pull_request:44883"}], "summary": "All suggested soft pairs are false positives. The cluster mixes unrelated PRs across different model fixes, CLI/generation refactors, kernel/integration changes, conversion mappings, and test-only edits; none appear to be the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44620|pull_request:45284", "pull_request:43995|pull_request:44414", "pull_request:44827|pull_request:45340", "pull_request:44126|pull_request:44417", "pull_request:43821|pull_request:44053", "pull_request:43889|pull_request:44634", "pull_request:44620|pull_request:44827", "pull_request:43500|pull_request:43558", "pull_request:43946|pull_request:44724", "pull_request:43592|pull_request:44883"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 21682, "estimated_input_tokens": 10713, "item_count": 18, "node_count": 18, "serialized_chars": 42852, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:36:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "21de86e677d3fd9fcbd0614d4e0f4997b98d4495d28f9c981bd601f28dcd9fcb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41145", "pull_request:42781", "pull_request:43488", "pull_request:43558", "pull_request:43690", "pull_request:43702", "pull_request:43711", "pull_request:43942", "pull_request:44296", "pull_request:44390", "pull_request:44414", "pull_request:44760", "pull_request:44801", "pull_request:44859", "pull_request:44883", "pull_request:45170", "pull_request:45284", "pull_request:45350"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44760 is the most substantial self-contained change, but it is only a representative by size/scope, not a duplicate target for the others.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR fits the cluster: the PRs target different models or unrelated infrastructure/test changes.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43488", "reason": "Both touch the ViT test file, but one is a repo-bot/version/dependency tweak and the other is a style-bot test edit; different intents and no shared bug/change.", "right": "pull_request:43558"}, {"accept": false, "left": "pull_request:43942", "reason": "A Bark test fix is unrelated to the broad auto-docstring config refactor.", "right": "pull_request:44296"}, {"accept": false, "left": "pull_request:41145", "reason": "These address different model work: Qwen3 rope kernel support vs an Isaac/Qwen3-backbone refactor; not the same code-path problem.", "right": "pull_request:44859"}, {"accept": false, "left": "pull_request:43711", "reason": "Scheduler initialization order and AMD-specific Qwen2 expectation updates are unrelated changes.", "right": "pull_request:45284"}, {"accept": false, "left": "pull_request:44414", "reason": "Model-loading tqdm verbosity is unrelated to dtype inference from a state dict.", "right": "pull_request:44883"}, {"accept": false, "left": "pull_request:44801", "reason": "Tokenizer-class hub fixes are unrelated to dtype guessing logic.", "right": "pull_request:44883"}, {"accept": false, "left": "pull_request:43690", "reason": "Notebook/Repl custom-model crash handling is a different bug from incorrect tokenizer class mapping on the hub.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:44760", "reason": "Both add model support, but for different models and code paths (Mistral 4 vs Granite4Vision).", "right": "pull_request:45350"}, {"accept": false, "left": "pull_request:45170", "reason": "A LayerNorm rename/conversion mapping fix is unrelated to Qwen2 CI expectation values.", "right": "pull_request:45284"}, {"accept": false, "left": "pull_request:44390", "reason": "Different model additions (Nemotron-H vs Mistral 4); same broad category, but not the same concrete change.", "right": "pull_request:44760"}, {"accept": false, "left": "pull_request:42781", "reason": "Different model launches (VibeVoice vs Moonshine streaming) with separate implementation and docs.", "right": "pull_request:43702"}], "summary": "This cluster is a loose set of unrelated pull requests: model additions, test-only tweaks, and small framework fixes. None of the soft edges look like the same underlying change, so they should all stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43488|pull_request:43558", "pull_request:43942|pull_request:44296", "pull_request:41145|pull_request:44859", "pull_request:43711|pull_request:45284", "pull_request:44414|pull_request:44883", "pull_request:44801|pull_request:44883", "pull_request:43690|pull_request:44801", "pull_request:44760|pull_request:45350", "pull_request:45170|pull_request:45284", "pull_request:44390|pull_request:44760", "pull_request:42781|pull_request:43702"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 21392, "estimated_input_tokens": 10568, "item_count": 18, "node_count": 18, "serialized_chars": 42269, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:36:40Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "572d57848873e32a57aec5e30b99eb159dbed1c47474addca0edf3b323be14ff", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43251", "pull_request:43558", "pull_request:43565", "pull_request:43588", "pull_request:43702", "pull_request:43712", "pull_request:43838", "pull_request:43995", "pull_request:44125", "pull_request:44293", "pull_request:44320", "pull_request:44390", "pull_request:44827", "pull_request:44883", "pull_request:44990", "pull_request:45139", "pull_request:45155", "pull_request:45170"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44320 is the most substantial and self-contained change in the set, but it is only a representative artifact, not a duplicate target for the others.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR for deduplication: the set is heterogeneous, and the PRs do not converge on one underlying fix or feature.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43702", "reason": "Both are model additions, but they add different models (Moonshine Streaming vs SAM3-LiteText) with different codepaths and files.", "right": "pull_request:44320"}, {"accept": false, "left": "pull_request:44125", "reason": "Video processor loading errors and PEFT adapter TP loading are unrelated fixes touching different subsystems.", "right": "pull_request:45155"}, {"accept": false, "left": "pull_request:44990", "reason": "Both mention vLLM-related cleanup, but one is a deprecation shim and the other is RoPE/cache/export compatibility changes; not the same bug.", "right": "pull_request:45139"}, {"accept": false, "left": "pull_request:43838", "reason": "These are separate model-support PRs for different architectures (Qwen3-ASR vs Nemotron H).", "right": "pull_request:44390"}, {"accept": false, "left": "pull_request:43251", "reason": "Cross-entropy kwargs passthrough and dtype guessing from state dict are distinct utility fixes with no shared codepath.", "right": "pull_request:44883"}, {"accept": false, "left": "pull_request:44827", "reason": "Mistral4 test fixes and layernorm renaming/conversion mapping are unrelated changes.", "right": "pull_request:45170"}, {"accept": false, "left": "pull_request:43712", "reason": "General model-property typing cleanup and Falcon output-interface refactoring are different refactors affecting different areas.", "right": "pull_request:43995"}, {"accept": false, "left": "pull_request:43251", "reason": "Loss utility keyword passthrough and Qwen3 Omni video feature return-type fix are unrelated.", "right": "pull_request:43588"}, {"accept": false, "left": "pull_request:44293", "reason": "Strict export cleanup and CLIP-like layrnorm-to-layernorm mapping are separate maintenance changes.", "right": "pull_request:45170"}, {"accept": false, "left": "pull_request:43558", "reason": "The style-bot noise PR and the repo-consistency bot workflow fix are not the same change, even though both touch CI/workflows.", "right": "pull_request:43565"}], "summary": "This cluster is mostly a mix of unrelated pull requests: model additions, workflow/docs changes, and small bugfixes. Despite some superficial similarity in titles or shared subsystems, none of the soft-edge pairs look like the same underlying change or bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43702|pull_request:44320", "pull_request:44125|pull_request:45155", "pull_request:44990|pull_request:45139", "pull_request:43838|pull_request:44390", "pull_request:43251|pull_request:44883", "pull_request:44827|pull_request:45170", "pull_request:43712|pull_request:43995", "pull_request:43251|pull_request:43588", "pull_request:44293|pull_request:45170", "pull_request:43558|pull_request:43565"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19230, "estimated_input_tokens": 9487, "item_count": 18, "node_count": 18, "serialized_chars": 37948, "soft_pair_count": 10}, "cached_at": "2026-04-14T13:37:03Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b2985f5df5108e59c67fe8d095ccf65381f4b596391871f7fe72c83e85e7376c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40755", "pull_request:42993", "pull_request:43385", "pull_request:43553", "pull_request:43555", "pull_request:43558", "pull_request:43579", "pull_request:43778", "pull_request:43823", "pull_request:43858", "pull_request:43942", "pull_request:43948", "pull_request:43995", "pull_request:44417", "pull_request:44827", "pull_request:44883", "pull_request:45044", "pull_request:45285"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No global best PR is suitable as a representative duplicate: the set is heterogeneous, so any choice would be arbitrary rather than canonical.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR exists here: the items address different models and subsystems, with no shared concrete bug or implementation target.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43995", "reason": "Falcon output-collection refactor vs Janus image-generation bug fix; different models and different code paths.", "right": "pull_request:45044"}, {"accept": false, "left": "pull_request:43553", "reason": "Workflow permission setup vs a ViT test-style change; unrelated artifacts and not the same underlying fix.", "right": "pull_request:43558"}, {"accept": false, "left": "pull_request:43555", "reason": "Style bot permission tweak in `.github/workflows/pr-style-bot.yml` vs a ViT test file change; no shared bug/change.", "right": "pull_request:43558"}, {"accept": false, "left": "pull_request:43942", "reason": "Bark test fix vs Emu3 image processing bug fix; both are test/model maintenance but not the same issue.", "right": "pull_request:43948"}, {"accept": false, "left": "pull_request:43778", "reason": "Mamba/FalconMamba weight initialization changes vs adding MobileLLM support; different model families and goals.", "right": "pull_request:43823"}, {"accept": false, "left": "pull_request:43778", "reason": "State-space weight init change vs dtype-guessing fix in `modeling_utils.py`; unrelated code paths.", "right": "pull_request:44883"}, {"accept": false, "left": "pull_request:42993", "reason": "Benchmark_v2 XPU platform support vs Solar Open test XPU expectations; related to XPU only at a broad level, not the same concrete change.", "right": "pull_request:43579"}, {"accept": false, "left": "pull_request:44417", "reason": "Neuron kernel support vs Mistral4 test/bug work; different functionality and no shared implementation target.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:43858", "reason": "New GlmMoeDsa model support vs Gemma4 export/cache initialization work; separate models and separate subsystems.", "right": "pull_request:45285"}, {"accept": false, "left": "pull_request:40755", "reason": "TimesFM covariate forecasting feature vs UVDoc model support; both are model additions, but they are entirely different changes.", "right": "pull_request:43385"}], "summary": "This cluster is not a true duplicate set; it contains unrelated PRs spanning model additions, bug fixes, benchmarks, docs, tests, and workflow changes. The soft links are mostly superficial similarity on model/test file names or generic refactors."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43995|pull_request:45044", "pull_request:43553|pull_request:43558", "pull_request:43555|pull_request:43558", "pull_request:43942|pull_request:43948", "pull_request:43778|pull_request:43823", "pull_request:43778|pull_request:44883", "pull_request:42993|pull_request:43579", "pull_request:44417|pull_request:44827", "pull_request:43858|pull_request:45285", "pull_request:40755|pull_request:43385"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22592, "estimated_input_tokens": 11168, "item_count": 18, "node_count": 18, "serialized_chars": 44669, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:37:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "79129fde30f5ee3e9a59d2239ff381a23efca9d49599f456fe2055f4a23381ff", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40755", "pull_request:41145", "pull_request:41224", "pull_request:41272", "pull_request:43702", "pull_request:43707", "pull_request:43946", "pull_request:43948", "pull_request:43955", "pull_request:44051", "pull_request:44390", "pull_request:44502", "pull_request:44544", "pull_request:44808", "pull_request:44827", "pull_request:45213", "pull_request:45261", "pull_request:45284"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "If a representative PR is needed, #44808 is the strongest standalone candidate because it is a broad, self-contained model-support addition with docs, auto-class wiring, model code, and tests. It is not a duplicate of the others, though.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:40755", "reason": "Different model features in different subsystems: TimesFM covariates vs Moonshine streaming. Not the same bug/change.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:40755", "reason": "TimesFM forecasting covariates and DINOv3 image classification are unrelated model additions.", "right": "pull_request:41224"}, {"accept": false, "left": "pull_request:40755", "reason": "TimesFM covariates vs Qwen3 rope kernel support; different models and code paths.", "right": "pull_request:41145"}, {"accept": false, "left": "pull_request:44390", "reason": "Both are model-support PRs, but for entirely different models and tasks (NemotronH vs PP-OCRv5 text recognition).", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:45261", "reason": "Unrelated fixes: CircleCI workflow null-handling vs Qwen2 AMD expectation updates.", "right": "pull_request:45284"}, {"accept": false, "left": "pull_request:44051", "reason": "Gemma3n forward type-hint cleanup is unrelated to Mistral4 test fixes.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:43946", "reason": "Dockerfile install-source changes are unrelated to kernel version mapping logic.", "right": "pull_request:43955"}, {"accept": false, "left": "pull_request:43948", "reason": "Image-token counting fix vs Mistral4 test adjustments; not the same underlying problem.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:44502", "reason": "Type-checker fix in import utilities is unrelated to the model-creation skill content.", "right": "pull_request:45213"}, {"accept": false, "left": "pull_request:41272", "reason": "Both add models, but they are different model families with separate architectures and code paths.", "right": "pull_request:43702"}, {"accept": false, "left": "pull_request:44544", "reason": "ANSI styling fix in loading reports is unrelated to the model-creation skill PR.", "right": "pull_request:45213"}], "summary": "The set is highly heterogeneous: mostly unrelated model-addition PRs plus a few small bugfix/configuration PRs. None of the proposed soft edges look like the same underlying change or bug, so no duplicates are accepted."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:40755|pull_request:43707", "pull_request:40755|pull_request:41224", "pull_request:40755|pull_request:41145", "pull_request:44390|pull_request:44808", "pull_request:45261|pull_request:45284", "pull_request:44051|pull_request:44827", "pull_request:43946|pull_request:43955", "pull_request:43948|pull_request:44827", "pull_request:44502|pull_request:45213", "pull_request:41272|pull_request:43702", "pull_request:44544|pull_request:45213"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22042, "estimated_input_tokens": 10893, "item_count": 18, "node_count": 18, "serialized_chars": 43571, "soft_pair_count": 18}, "cached_at": "2026-04-14T13:37:59Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "77428050369ed6c1157625d196b78003785dd315a2e931dcfcedd295e64926b0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:39895", "pull_request:41763", "pull_request:43424", "pull_request:43486", "pull_request:44386", "pull_request:44395", "pull_request:44400", "pull_request:44413", "pull_request:44542", "pull_request:44634", "pull_request:44739", "pull_request:45187", "pull_request:45213", "pull_request:45283", "pull_request:45285", "pull_request:45298", "pull_request:45394", "pull_request:45429"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45285 is the strongest standalone representative: it is merged, substantive, and has a coherent export/cache fix with integration coverage. It still does not match the rest of the cluster as a duplicate.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR captures a shared underlying change; the items span unrelated subsystems and model families.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:39895", "reason": "Videoprism model addition vs Executorch dynamic-shape export test; unrelated features.", "right": "pull_request:43424"}, {"accept": false, "left": "pull_request:44739", "reason": "Janus test/processer tweak vs X-CLIP processor/test fix; different models and failure modes.", "right": "pull_request:45394"}, {"accept": false, "left": "pull_request:41763", "reason": "Both are model-addition PRs, but for different models (TimesFM 2.5 vs Qwen2.5-VL) and different code paths.", "right": "pull_request:45298"}, {"accept": false, "left": "pull_request:43486", "reason": "Video utility bug fix vs model-creation skill/docs content; no shared underlying bug or change.", "right": "pull_request:45213"}, {"accept": false, "left": "pull_request:41763", "reason": "TimesFM 2.5 integration vs Qwen3.5 GGUF loading support; different model families and purposes.", "right": "pull_request:45283"}, {"accept": false, "left": "pull_request:44386", "reason": "Sampling logic change for Higgs Audio V2 vs backend dependency annotation for its tokenizer; not the same concrete fix.", "right": "pull_request:44542"}, {"accept": false, "left": "pull_request:44542", "reason": "Tokenizer backend dependency fix vs Gemma4 export/cache fix; unrelated changes.", "right": "pull_request:45285"}, {"accept": false, "left": "pull_request:44400", "reason": "Loading-report formatting/terminal check fix vs tokenizer dependency fix; separate concerns.", "right": "pull_request:44542"}, {"accept": false, "left": "pull_request:45187", "reason": "File-handle cleanup in serving utils vs CI workflow tweak; unrelated code paths.", "right": "pull_request:45429"}, {"accept": false, "left": "pull_request:44395", "reason": "Kernel-loading security hardening vs PEFT conversion mapping fix; different subsystems and bugs.", "right": "pull_request:44413"}, {"accept": false, "left": "pull_request:44395", "reason": "Kernel security policy change vs LFM2 kernel-path cache fix; not the same issue.", "right": "pull_request:44634"}, {"accept": false, "left": "pull_request:44413", "reason": "PEFT mapping bug fix vs LFM2 kernel-path bug fix; no overlap in concrete change.", "right": "pull_request:44634"}, {"accept": false, "left": "pull_request:44386", "reason": "Audio sampling fix vs kernel security hardening; unrelated domains.", "right": "pull_request:44395"}, {"accept": false, "left": "pull_request:44386", "reason": "Higgs Audio sampling fix vs PEFT conversion mapping fix; different code paths.", "right": "pull_request:44413"}, {"accept": false, "left": "pull_request:44386", "reason": "Higgs Audio sampling logic vs LFM2 kernel-path cache logic; no shared underlying bug.", "right": "pull_request:44634"}, {"accept": false, "left": "pull_request:44395", "reason": "Kernel-loading security change vs Gemma4 export/cache support; unrelated.", "right": "pull_request:45285"}, {"accept": false, "left": "pull_request:44413", "reason": "PEFT mapping fix vs Gemma4 export/cache support; different model/integration areas.", "right": "pull_request:45285"}, {"accept": false, "left": "pull_request:44634", "reason": "LFM2 kernel-path fix vs Gemma4 export/cache support; distinct bugs and code paths.", "right": "pull_request:45285"}], "summary": "This cluster is a mixed bag of unrelated pull requests: model additions, small bug fixes, docs changes, security/workflow edits, and export/test tweaks. None of the soft-similarity pairs look like the same underlying bug or change, so no duplicate merge is justified."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:39895|pull_request:43424", "pull_request:44739|pull_request:45394", "pull_request:41763|pull_request:45298", "pull_request:43486|pull_request:45213", "pull_request:41763|pull_request:45283", "pull_request:44386|pull_request:44542", "pull_request:44542|pull_request:45285", "pull_request:44400|pull_request:44542", "pull_request:45187|pull_request:45429", "pull_request:44395|pull_request:44413", "pull_request:44395|pull_request:44634", "pull_request:44413|pull_request:44634", "pull_request:44386|pull_request:44395", "pull_request:44386|pull_request:44413", "pull_request:44386|pull_request:44634", "pull_request:44395|pull_request:45285", "pull_request:44413|pull_request:45285", "pull_request:44634|pull_request:45285"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 23634, "estimated_input_tokens": 11689, "item_count": 17, "node_count": 17, "serialized_chars": 46755, "soft_pair_count": 21}, "cached_at": "2026-04-14T13:38:21Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c9bf8c233f87d5a7202e7e4eb4d84d555b957a77bbc12fea0ad9beff7c87c42f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42848", "pull_request:43173", "pull_request:43623", "pull_request:43838", "pull_request:43858", "pull_request:43973", "pull_request:43995", "pull_request:44395", "pull_request:44413", "pull_request:44542", "pull_request:44633", "pull_request:44760", "pull_request:44808", "pull_request:44827", "pull_request:44866", "pull_request:44972", "pull_request:45213"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#44760 is the best general PR candidate: it is merged, relatively complete, and covers a coherent end-to-end feature addition rather than a narrow test tweak or partial scaffold.", "canonical_issue_reason": null, "canonical_pr_reason": "No issue artifacts are present; among the PRs, #44760 is the strongest standalone representative because it is a fully integrated model-addition PR with code, docs, auto-registry updates, and tests.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43838", "reason": "Both are model-addition PRs touching Auto registries, but they add different models (Qwen3-ASR vs Mistral 4) with different code paths and no shared underlying defect.", "right": "pull_request:44760"}, {"accept": false, "left": "pull_request:43858", "reason": "One adds a new GLM model doc/code package; the other is a small MedASR documentation update. Similar area, but clearly different changes.", "right": "pull_request:44633"}, {"accept": false, "left": "pull_request:43623", "reason": "Zamba weight-tying fix vs adding GLM docs/model support are unrelated changes; no common bug or mergeable combined fix.", "right": "pull_request:43858"}, {"accept": false, "left": "pull_request:43173", "reason": "A flaky-test tolerance change is not the same change as introducing a new GLM model implementation and docs.", "right": "pull_request:43858"}, {"accept": false, "left": "pull_request:42848", "reason": "Broad attention-mask interface refactor across many models is unrelated to adding GLM MoE-DSA support.", "right": "pull_request:43858"}, {"accept": false, "left": "pull_request:43858", "reason": "New GLM model support and LFM2 cache-alignment are separate model implementations with different code paths.", "right": "pull_request:44866"}, {"accept": false, "left": "pull_request:43858", "reason": "GLM model addition and AMD CI expectation updates are different kinds of changes; the latter is test-data maintenance only.", "right": "pull_request:44972"}, {"accept": false, "left": "pull_request:43858", "reason": "GLM model support and a backend-dependency annotation fix for Higgs Audio tokenizer are unrelated.", "right": "pull_request:44542"}, {"accept": false, "left": "pull_request:44542", "reason": "Backend dependency fix vs MedASR docs update are not the same bug/change, just both small maintenance PRs.", "right": "pull_request:44633"}, {"accept": false, "left": "pull_request:43623", "reason": "Zamba weight-tying regex fix and backend-dependency annotation are unrelated maintenance fixes.", "right": "pull_request:44542"}, {"accept": false, "left": "pull_request:43623", "reason": "Zamba state-dict naming fix and MedASR documentation update do not address the same issue.", "right": "pull_request:44633"}, {"accept": false, "left": "pull_request:43173", "reason": "Flaky test tolerance and backend dependency decoration are different concerns with no shared code-path bug.", "right": "pull_request:44542"}, {"accept": false, "left": "pull_request:43173", "reason": "A test tolerance bump is not the same underlying change as a documentation edit.", "right": "pull_request:44633"}, {"accept": false, "left": "pull_request:43995", "reason": "Falcon output-collection refactor and a model-creation skill scaffold are unrelated; the latter is tooling/docs, not model code.", "right": "pull_request:45213"}, {"accept": false, "left": "pull_request:43858", "reason": "GLM model addition and kernels security hardening are completely different features with no shared code-path problem.", "right": "pull_request:44395"}, {"accept": false, "left": "pull_request:44395", "reason": "Kernel-loading security changes and a LASR doc update are unrelated.", "right": "pull_request:44633"}, {"accept": false, "left": "pull_request:44413", "reason": "PEFT mapping fix and MedASR documentation update do not target the same defect.", "right": "pull_request:44633"}, {"accept": false, "left": "pull_request:43623", "reason": "Zamba tied-weights fix and kernels security mitigation are unrelated changes.", "right": "pull_request:44395"}, {"accept": false, "left": "pull_request:43623", "reason": "Zamba weight-tying regex fix and PEFT conversion mapping fix are different code paths and different failure modes.", "right": "pull_request:44413"}, {"accept": false, "left": "pull_request:43973", "reason": "Both add model support and touch Auto/docs plumbing, but they are for different model families (LFM2 audio vs PP-OCRv5) and not the same concrete change.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:44827", "reason": "Mistral4 test fixes and a new-model creation skill are unrelated; one is code/test maintenance, the other is repository tooling.", "right": "pull_request:45213"}], "summary": "The cluster is a loose bundle of unrelated feature PRs (new model support, docs additions, test tolerance tweaks, and small refactors). The soft edges are driven mostly by shared boilerplate like Auto* registration, docs TOC edits, or broad model-family work, not the same underlying bug/change. I would not merge any of the PR pairs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43838|pull_request:44760", "pull_request:43858|pull_request:44633", "pull_request:43623|pull_request:43858", "pull_request:43173|pull_request:43858", "pull_request:42848|pull_request:43858", "pull_request:43858|pull_request:44866", "pull_request:43858|pull_request:44972", "pull_request:43858|pull_request:44542", "pull_request:44542|pull_request:44633", "pull_request:43623|pull_request:44542", "pull_request:43623|pull_request:44633", "pull_request:43173|pull_request:44542", "pull_request:43173|pull_request:44633", "pull_request:43995|pull_request:45213", "pull_request:43858|pull_request:44395", "pull_request:44395|pull_request:44633", "pull_request:44413|pull_request:44633", "pull_request:43623|pull_request:44395", "pull_request:43623|pull_request:44413", "pull_request:43973|pull_request:44808", "pull_request:44827|pull_request:45213"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22000, "estimated_input_tokens": 10872, "item_count": 18, "node_count": 18, "serialized_chars": 43488, "soft_pair_count": 15}, "cached_at": "2026-04-14T13:38:41Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "73204959310916f93a7028939abacd54fa4aca8eab7e29e9bb16602106449ed6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40820", "pull_request:43178", "pull_request:43385", "pull_request:43498", "pull_request:43649", "pull_request:43677", "pull_request:43707", "pull_request:43936", "pull_request:44390", "pull_request:44739", "pull_request:44770", "pull_request:45157", "pull_request:45286", "pull_request:45287", "pull_request:45288", "pull_request:45298", "pull_request:45403", "pull_request:45410"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No PR is a strong global representative because there is no shared underlying code-path change across the cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR fits this cluster; the items are not duplicates and span different subsystems, models, and purposes.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44770", "reason": "Unrelated fixes: strict config handling vs AltCLIP test/device compatibility; different models and code paths.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:44770", "reason": "Different scopes: config strictness cleanup vs Nomic BERT xpu test expectations.", "right": "pull_request:45286"}, {"accept": false, "left": "pull_request:44770", "reason": "Different scopes: config strictness cleanup vs VideoMT xpu test expectations.", "right": "pull_request:45287"}, {"accept": false, "left": "pull_request:44770", "reason": "Different scopes: config strictness cleanup vs Cohere ASR xpu test expectations.", "right": "pull_request:45288"}, {"accept": false, "left": "pull_request:43385", "reason": "Both add model support, but for unrelated models and codebases (UVDoc vs PrismML Bonsai).", "right": "pull_request:45157"}, {"accept": false, "left": "pull_request:43707", "reason": "Unrelated model additions for different architectures; not the same underlying change.", "right": "pull_request:45157"}, {"accept": false, "left": "pull_request:43178", "reason": "Benchmark model additions are unrelated to AltCLIP test/device fixes.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:43178", "reason": "Benchmark framework/model additions do not match Nomic BERT test-only fixes.", "right": "pull_request:45286"}, {"accept": false, "left": "pull_request:43178", "reason": "Benchmark model work and VideoMT test expectation changes are different changes.", "right": "pull_request:45287"}, {"accept": false, "left": "pull_request:43178", "reason": "Benchmark additions are unrelated to Cohere ASR test expectation updates.", "right": "pull_request:45288"}, {"accept": false, "left": "pull_request:43178", "reason": "Benchmark additions and a common test helper refactor are not the same concrete bug/change.", "right": "pull_request:45403"}, {"accept": false, "left": "pull_request:43936", "reason": "Both fix tests, but for different models and distinct failure modes; not mergeable as one PR.", "right": "pull_request:44739"}, {"accept": false, "left": "pull_request:43498", "reason": "Backward-compatibility aliasing for tie_weights is unrelated to the video frame/fps parameter handling fix.", "right": "pull_request:43677"}, {"accept": false, "left": "pull_request:40820", "reason": "Benchmark additions vs Nemotron support are different features with no shared code-path problem.", "right": "pull_request:44390"}, {"accept": false, "left": "pull_request:43649", "reason": "CI workflow changes are unrelated to adding a new Qwen2.5-VL model implementation.", "right": "pull_request:45298"}], "summary": "This cluster is heterogeneous: it mixes unrelated model additions, benchmark work, CI/workflow tweaks, and isolated test or compatibility fixes. The soft-similarity pairs are mostly spurious and do not indicate the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44770|pull_request:45410", "pull_request:44770|pull_request:45286", "pull_request:44770|pull_request:45287", "pull_request:44770|pull_request:45288", "pull_request:43385|pull_request:45157", "pull_request:43707|pull_request:45157", "pull_request:43178|pull_request:45410", "pull_request:43178|pull_request:45286", "pull_request:43178|pull_request:45287", "pull_request:43178|pull_request:45288", "pull_request:43178|pull_request:45403", "pull_request:43936|pull_request:44739", "pull_request:43498|pull_request:43677", "pull_request:40820|pull_request:44390", "pull_request:43649|pull_request:45298"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 14572, "estimated_input_tokens": 7158, "item_count": 18, "node_count": 18, "serialized_chars": 28632, "soft_pair_count": 17}, "cached_at": "2026-04-14T13:39:07Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "aac0f53f10c1a4ea6779a017ba9df236d098120f30b527b1f1500c4e9acc37e6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40755", "pull_request:41145", "pull_request:41224", "pull_request:41722", "pull_request:42993", "pull_request:43385", "pull_request:43465", "pull_request:43627", "pull_request:43707", "pull_request:43838", "pull_request:43902", "pull_request:44053", "pull_request:44413", "pull_request:44571", "pull_request:44760", "pull_request:45139", "pull_request:45283", "pull_request:45298"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No PR is a good global representative because none of the candidate pairs describe the same concrete fix or mergeable change.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR is a duplicate anchor; the items span unrelated docs, model-support, benchmark, and conversion changes.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43902", "reason": "Unrelated doc-localization cleanup vs RoPE/vLLM-related code/docs changes; no shared bug or change.", "right": "pull_request:45139"}, {"accept": false, "left": "pull_request:41722", "reason": "Tamil README documentation vs Qwen3-ASR model support; completely different artifacts.", "right": "pull_request:43838"}, {"accept": false, "left": "pull_request:40755", "reason": "TimesFM covariate forecasting vs Intel XPU benchmark support are unrelated changes.", "right": "pull_request:42993"}, {"accept": false, "left": "pull_request:43465", "reason": "Both mention conversion, but they fix different code paths: GGUF recent conversion issues vs Pegasus tokenizer/conversion fixes.", "right": "pull_request:44571"}, {"accept": false, "left": "pull_request:43465", "reason": "GGUF conversion issues and PEFT conversion typo are different integrations and different bugs.", "right": "pull_request:44053"}, {"accept": false, "left": "pull_request:45283", "reason": "Qwen3.5 GGUF loading support vs a new Qwen2.5-VL model/docs addition; unrelated changes.", "right": "pull_request:45298"}, {"accept": false, "left": "pull_request:44413", "reason": "PEFT mapping fix and Pegasus conversion fix are separate conversion problems in different subsystems.", "right": "pull_request:44571"}, {"accept": false, "left": "pull_request:43465", "reason": "GGUF tokenizer/loading fixes vs PEFT mapping fixes; not the same underlying issue.", "right": "pull_request:44413"}, {"accept": false, "left": "pull_request:41224", "reason": "Both add model support, but for different models and different code paths; not mergeable as one fix.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:41224", "reason": "DINOv3ViT image-classification support vs SLANeXt model support are unrelated model additions.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:42993", "reason": "Benchmark XPU support vs UVDoc model support are unrelated changes.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:43385", "reason": "UVDoc model support and Qwen3.5 GGUF loading support address different models and code paths.", "right": "pull_request:45283"}, {"accept": false, "left": "pull_request:41224", "reason": "Image-classification model support vs benchmark hardware support are unrelated.", "right": "pull_request:42993"}, {"accept": false, "left": "pull_request:43385", "reason": "Different model support PRs (UVDoc vs Mistral4); same broad area, but not the same underlying change.", "right": "pull_request:44760"}, {"accept": false, "left": "pull_request:41224", "reason": "DINOv3ViT support and Mistral4 support are separate model additions.", "right": "pull_request:44760"}, {"accept": false, "left": "pull_request:41145", "reason": "Qwen3 rope-kernel support and DINOv3ViT classification support are unrelated.", "right": "pull_request:41224"}, {"accept": false, "left": "pull_request:43627", "reason": "A standalone scratch notebook is unrelated to Qwen2.5-VL model support.", "right": "pull_request:45298"}], "summary": "This cluster is not a duplicate set; it mixes unrelated documentation, model-support, conversion, and benchmark changes. None of the soft edges look like the same underlying bug or change, so all should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43902|pull_request:45139", "pull_request:41722|pull_request:43838", "pull_request:40755|pull_request:42993", "pull_request:43465|pull_request:44571", "pull_request:43465|pull_request:44053", "pull_request:45283|pull_request:45298", "pull_request:44413|pull_request:44571", "pull_request:43465|pull_request:44413", "pull_request:41224|pull_request:43385", "pull_request:41224|pull_request:43707", "pull_request:42993|pull_request:43385", "pull_request:43385|pull_request:45283", "pull_request:41224|pull_request:42993", "pull_request:43385|pull_request:44760", "pull_request:41224|pull_request:44760", "pull_request:41145|pull_request:41224", "pull_request:43627|pull_request:45298"], "split": true, "trimmed": true} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22620, "estimated_input_tokens": 11182, "item_count": 18, "node_count": 18, "serialized_chars": 44726, "soft_pair_count": 13}, "cached_at": "2026-04-14T13:39:33Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1e526328be3ae1043331abfada923e5e64b432050a2a7105de46c109ad00b27f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41061", "pull_request:41272", "pull_request:42227", "pull_request:43385", "pull_request:43527", "pull_request:43558", "pull_request:43647", "pull_request:43648", "pull_request:43649", "pull_request:43677", "pull_request:43707", "pull_request:43946", "pull_request:44055", "pull_request:44152", "pull_request:44319", "pull_request:44357", "pull_request:44395", "pull_request:44994"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#43385 is the best global representative among the listed PRs: it is merged, broad in scope, and includes full model support plus documentation and tests, making it the most complete standalone change in the set.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR emerges from this cluster because the items are unrelated changes rather than variants of one fix; if a representative is needed, #43385 is the strongest general anchor because it is a merged, substantial model-support PR with docs/tests and very high review activity.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:42227", "reason": "Whisper return-language handling vs BEiT/DPT image-label fixing are unrelated code paths and different subsystems.", "right": "pull_request:43527"}, {"accept": false, "left": "pull_request:41272", "reason": "HRM model addition and UVDoc model support are separate model integrations, not the same bug or change.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:41272", "reason": "HRM and SLANeXt are distinct model-support PRs with different files, tests, and integration work.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:43946", "reason": "Dockerfile package changes and the unprotected torch import fix are unrelated maintenance changes.", "right": "pull_request:44055"}, {"accept": false, "left": "pull_request:41272", "reason": "HRM model support and Sarvam model addition are different feature PRs with no shared concrete fix.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:43647", "reason": "These both touch CI/reporting, but the diffs indicate different workflow/reporting changes rather than one identical underlying fix.", "right": "pull_request:43649"}, {"accept": false, "left": "pull_request:43648", "reason": "Shared CI files are not enough here; one PR is a workflow tweak and the other is a broader failure-reporting change.", "right": "pull_request:43649"}, {"accept": false, "left": "pull_request:41061", "reason": "Parakeet ASR support and grouped_mm autograd support are unrelated features in different parts of the library.", "right": "pull_request:44152"}, {"accept": false, "left": "pull_request:41061", "reason": "Parakeet model support and non-gated expert support in MoE are different changes with no shared code path.", "right": "pull_request:44319"}, {"accept": false, "left": "pull_request:43677", "reason": "A multimodal kwargs fix in processing_utils and a RoPE initialization fix are unrelated bugs.", "right": "pull_request:44357"}, {"accept": false, "left": "pull_request:43558", "reason": "A style-only test edit and a self-comment CI workflow change are not the same underlying issue.", "right": "pull_request:43647"}, {"accept": false, "left": "pull_request:43558", "reason": "Style-bot noise in a test file does not match the CI workflow/reporting changes in the other PR.", "right": "pull_request:43648"}, {"accept": false, "left": "pull_request:43677", "reason": "Video-kwargs handling and kernel security hardening are separate concerns with no overlapping fix objective.", "right": "pull_request:44395"}], "summary": "The cluster is heterogeneous: it mixes unrelated model-addition PRs, workflow cleanup, small bug fixes, and doc/security changes. None of the soft pairs look like the same underlying change, so no duplicates should be merged here."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:42227|pull_request:43527", "pull_request:41272|pull_request:43385", "pull_request:41272|pull_request:43707", "pull_request:43946|pull_request:44055", "pull_request:41272|pull_request:44994", "pull_request:43647|pull_request:43649", "pull_request:43648|pull_request:43649", "pull_request:41061|pull_request:44152", "pull_request:41061|pull_request:44319", "pull_request:43677|pull_request:44357", "pull_request:43558|pull_request:43647", "pull_request:43558|pull_request:43648", "pull_request:43677|pull_request:44395"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 16590, "estimated_input_tokens": 8167, "item_count": 18, "node_count": 18, "serialized_chars": 32665, "soft_pair_count": 22}, "cached_at": "2026-04-14T13:40:02Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9abd4a0f2ec5488a78d438b102be3b0ef601ad5a81e432a3387b8f1479166434", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40294", "pull_request:41061", "pull_request:43098", "pull_request:43247", "pull_request:43274", "pull_request:43291", "pull_request:43385", "pull_request:43498", "pull_request:43627", "pull_request:43647", "pull_request:43648", "pull_request:43707", "pull_request:43884", "pull_request:44034", "pull_request:44308", "pull_request:44994", "pull_request:45139", "pull_request:45286"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43098 is the best overall representative because it is a substantial but conventional model-support addition, clearly scoped and merged, with strong review activity and the usual implementation surface.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43098 is a clean, merged single-model support change with a clear title and the standard docs/auto-registry footprint, making it the most representative PR in this mixed cluster.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43647", "reason": "Different changes: one edits self-comment CI/workflows, the other is a vit test tweak; same file overlap is incidental.", "right": "pull_request:43884"}, {"accept": false, "left": "pull_request:43647", "reason": "Different test/workflow modifications in separate model test files; no shared bug or concrete fix path.", "right": "pull_request:44034"}, {"accept": false, "left": "pull_request:43648", "reason": "One PR changes workflow/notification utilities plus tests, the other is only a vit test change; not the same underlying issue.", "right": "pull_request:43884"}, {"accept": false, "left": "pull_request:43648", "reason": "Overlap is only broad test/workflow plumbing; one targets clip/vit tests and utilities, the other is a vit-only test tweak.", "right": "pull_request:44034"}, {"accept": false, "left": "pull_request:41061", "reason": "Both add different model support (Parakeet/TDT vs UVDoc) with different files and model-specific code paths.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:41061", "reason": "Both are separate model additions with different architecture/codegen changes; shared registry/docs files are generic boilerplate.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:43498", "reason": "Different concerns: a backward-compatibility/tie_weights tweak versus rope/CI-related fixes; not the same code-path problem.", "right": "pull_request:45139"}, {"accept": false, "left": "pull_request:40294", "reason": "Separate model-support PRs for Higgs Audio V2 and UVDoc; same general pattern, but different models and implementations.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:40294", "reason": "Different model support for Higgs Audio V2 versus SLANeXt; shared auto/docs files are generic registry updates.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:43385", "reason": "UVDoc and SAM3-LiteText are different models with different model classes/configs; no plausible single merged fix.", "right": "pull_request:44308"}, {"accept": false, "left": "pull_request:43707", "reason": "Distinct model additions (SLANeXt vs SAM3-LiteText) affecting different implementations and docs.", "right": "pull_request:44308"}, {"accept": false, "left": "pull_request:43098", "reason": "PP-DocLayoutV3 support and Sarvam model support are unrelated model additions; no shared bug or feature change.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:43247", "reason": "PP-OCRv5_mobile_det and Sarvam are different model families; the overlap is only generic model-registry work.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:43274", "reason": "PP-OCRv5_server_det and Sarvam are unrelated additions with different concrete implementations.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:40294", "reason": "Both are separate model additions (Higgs Audio V2 vs PP-DocLayoutV3), not the same underlying change.", "right": "pull_request:43098"}, {"accept": false, "left": "pull_request:40294", "reason": "Different model-support PRs for unrelated model families; shared files are standard auto-registration boilerplate.", "right": "pull_request:43247"}, {"accept": false, "left": "pull_request:40294", "reason": "Different concrete model additions (Higgs Audio V2 vs PP-OCRv5_server_det); not mergeable as one fix.", "right": "pull_request:43274"}, {"accept": false, "left": "pull_request:43098", "reason": "PP-DocLayoutV3 and SAM3-LiteText are distinct model implementations and docs, with no shared bug or feature target.", "right": "pull_request:44308"}, {"accept": false, "left": "pull_request:43247", "reason": "Different model families and code paths; only generic support scaffolding overlaps.", "right": "pull_request:44308"}, {"accept": false, "left": "pull_request:43274", "reason": "PP-OCRv5_server_det and SAM3-LiteText are unrelated model support additions.", "right": "pull_request:44308"}, {"accept": false, "left": "pull_request:43627", "reason": "A scratch notebook addition and a model-support PR are not the same change or bug.", "right": "pull_request:44308"}, {"accept": false, "left": "pull_request:43291", "reason": "Both touch tests/tokenization, but they are unrelated: Whisper test fixes versus Nomic BERT auto-fixing failing tests.", "right": "pull_request:45286"}], "summary": "This cluster is mostly unrelated pull requests: several independent model-support additions, a couple of test/workflow tweaks, and one small compatibility fix. None of the soft pairs look like true duplicates; they target different models or different code paths."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43647|pull_request:43884", "pull_request:43647|pull_request:44034", "pull_request:43648|pull_request:43884", "pull_request:43648|pull_request:44034", "pull_request:41061|pull_request:43385", "pull_request:41061|pull_request:43707", "pull_request:43498|pull_request:45139", "pull_request:40294|pull_request:43385", "pull_request:40294|pull_request:43707", "pull_request:43385|pull_request:44308", "pull_request:43707|pull_request:44308", "pull_request:43098|pull_request:44994", "pull_request:43247|pull_request:44994", "pull_request:43274|pull_request:44994", "pull_request:40294|pull_request:43098", "pull_request:40294|pull_request:43247", "pull_request:40294|pull_request:43274", "pull_request:43098|pull_request:44308", "pull_request:43247|pull_request:44308", "pull_request:43274|pull_request:44308", "pull_request:43627|pull_request:44308", "pull_request:43291|pull_request:45286"], "split": true, "trimmed": true} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 16052, "estimated_input_tokens": 7898, "item_count": 18, "node_count": 18, "serialized_chars": 31590, "soft_pair_count": 23}, "cached_at": "2026-04-14T13:40:22Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a1c85c47d539fafb4372415eb7fb77fd1ff3bf69f39b256e3001af24b2b87bf4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:36895", "pull_request:39455", "pull_request:42772", "pull_request:42978", "pull_request:43291", "pull_request:43385", "pull_request:43448", "pull_request:43451", "pull_request:43665", "pull_request:43858", "pull_request:44539", "pull_request:44815", "pull_request:44994", "pull_request:45286", "pull_request:45287", "pull_request:45288", "pull_request:45403", "pull_request:45410"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No single PR is a good cluster representative; the overlap is too broad and mostly incidental.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the items span distinct models and unrelated test/code changes, with only generic registry/docs overlap.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43291", "reason": "Different models and files: Whisper decode logic vs VideomT test adjustments; unrelated fixes.", "right": "pull_request:45287"}, {"accept": false, "left": "pull_request:43291", "reason": "Whisper tokenizer change and Cohere ASR test fix address separate code paths.", "right": "pull_request:45288"}, {"accept": false, "left": "pull_request:43448", "reason": "Both are model additions, but for different models (Molmo vs Molmo2) with distinct implementation changes.", "right": "pull_request:43451"}, {"accept": false, "left": "pull_request:39455", "reason": "EuroBERT and ViT NEPA are separate model-support PRs, not the same change.", "right": "pull_request:42978"}, {"accept": false, "left": "pull_request:42978", "reason": "ViT NEPA and Molmo are unrelated model additions despite shared auto/docs boilerplate.", "right": "pull_request:43448"}, {"accept": false, "left": "pull_request:42978", "reason": "ViT NEPA and Molmo2 are different model integrations; shared registry edits are generic.", "right": "pull_request:43451"}, {"accept": false, "left": "pull_request:36895", "reason": "RF-DETR and EuroBERT are unrelated model additions.", "right": "pull_request:39455"}, {"accept": false, "left": "pull_request:43448", "reason": "Molmo and A.X K1 are different model additions with different code paths.", "right": "pull_request:44539"}, {"accept": false, "left": "pull_request:43451", "reason": "Molmo2 and A.X K1 are unrelated model-support PRs.", "right": "pull_request:44539"}, {"accept": false, "left": "pull_request:43448", "reason": "Molmo model addition and Sarvam model addition are distinct changes.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:43451", "reason": "Molmo2 and Sarvam are separate model implementations, not duplicates.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:42978", "reason": "ViT NEPA model support and Sarvam model support do not fix the same issue.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:43448", "reason": "Molmo and GlmMoeDsa are different model additions; overlap is only shared auto/docs plumbing.", "right": "pull_request:43858"}, {"accept": false, "left": "pull_request:43451", "reason": "Molmo2 and GlmMoeDsa are unrelated model-support PRs.", "right": "pull_request:43858"}, {"accept": false, "left": "pull_request:43385", "reason": "UVDoc and Molmo are different model additions; shared auto/docs files are boilerplate.", "right": "pull_request:43448"}, {"accept": false, "left": "pull_request:43385", "reason": "UVDoc and Molmo2 are separate model integrations, not one fix.", "right": "pull_request:43451"}, {"accept": false, "left": "pull_request:43665", "reason": "CLIP/ViT test adjustments and AltCLIP test fixes target different models and failures.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:44815", "reason": "Dequant/fp8 loading changes in Mistral4 are unrelated to AltCLIP test fixes.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:43665", "reason": "CLIP/ViT test fixes and Nomic BERT test fixes are separate test-only changes.", "right": "pull_request:45286"}, {"accept": false, "left": "pull_request:43665", "reason": "Different test suites for different models; no shared underlying bug.", "right": "pull_request:45287"}, {"accept": false, "left": "pull_request:43665", "reason": "CLIP/ViT test changes and Cohere ASR test changes are unrelated.", "right": "pull_request:45288"}, {"accept": false, "left": "pull_request:43665", "reason": "CLIP/ViT test adjustments are unrelated to the ClipSeg common-test fix.", "right": "pull_request:45403"}, {"accept": false, "left": "pull_request:42772", "reason": "Tokenization-auto tweak for a model family is unrelated to CLIP/ViT test fixes.", "right": "pull_request:43665"}], "summary": "This cluster is a mix of unrelated model-addition PRs and separate test/code fixes. The shared files are mostly boilerplate registration/docs edits, not evidence of the same underlying change, so none of the soft edges should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43291|pull_request:45287", "pull_request:43291|pull_request:45288", "pull_request:43448|pull_request:43451", "pull_request:39455|pull_request:42978", "pull_request:42978|pull_request:43448", "pull_request:42978|pull_request:43451", "pull_request:36895|pull_request:39455", "pull_request:43448|pull_request:44539", "pull_request:43451|pull_request:44539", "pull_request:43448|pull_request:44994", "pull_request:43451|pull_request:44994", "pull_request:42978|pull_request:44994", "pull_request:43448|pull_request:43858", "pull_request:43451|pull_request:43858", "pull_request:43385|pull_request:43448", "pull_request:43385|pull_request:43451", "pull_request:43665|pull_request:45410", "pull_request:44815|pull_request:45410", "pull_request:43665|pull_request:45286", "pull_request:43665|pull_request:45287", "pull_request:43665|pull_request:45288", "pull_request:43665|pull_request:45403", "pull_request:42772|pull_request:43665"], "split": true, "trimmed": true} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20904, "estimated_input_tokens": 10324, "item_count": 18, "node_count": 18, "serialized_chars": 41293, "soft_pair_count": 24}, "cached_at": "2026-04-14T13:41:02Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a3b9056d997961bf242aa82aec06b0fd4ea57c10b9ab374792cf1a373fdaa199", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42227", "pull_request:42772", "pull_request:43291", "pull_request:43465", "pull_request:43544", "pull_request:43665", "pull_request:43669", "pull_request:43677", "pull_request:43851", "pull_request:43926", "pull_request:44053", "pull_request:44055", "pull_request:44564", "pull_request:44571", "pull_request:44815", "pull_request:44876", "pull_request:45016", "pull_request:45139"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:43465 is the strongest standalone candidate: it is merged, explicitly linked to an issue, and fixes a concrete gguf conversion/load-path bug. It is still not a duplicate of the other items.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR: the items span distinct subsystems and unrelated fixes, with no shared files, targets, or concrete code paths tying them together.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43665", "reason": "Different problems: CLIP/ViT test edits vs ModernBERT norm_eps dtype fix; no shared code path.", "right": "pull_request:43669"}, {"accept": false, "left": "pull_request:43665", "reason": "Test-only changes vs GLM DSA attention/inference fix; unrelated model paths.", "right": "pull_request:44564"}, {"accept": false, "left": "pull_request:43665", "reason": "Test edits vs Pegasus conversion/dependency changes; not the same bug.", "right": "pull_request:44571"}, {"accept": false, "left": "pull_request:43665", "reason": "Model test changes vs NemotronH modular cleanup; no common underlying issue.", "right": "pull_request:44876"}, {"accept": false, "left": "pull_request:42227", "reason": "Whisper ASR return_language fix vs test breakage injections in CLIP/ViT; different subsystems.", "right": "pull_request:43665"}, {"accept": false, "left": "pull_request:43544", "reason": "Docstring typo cleanup vs model test edits; unrelated.", "right": "pull_request:43665"}, {"accept": false, "left": "pull_request:43665", "reason": "Test changes vs PEFT conversion typo fix; different code paths.", "right": "pull_request:44053"}, {"accept": false, "left": "pull_request:43665", "reason": "Test changes vs unprotected torch import guard; unrelated.", "right": "pull_request:44055"}, {"accept": false, "left": "pull_request:43665", "reason": "Test-only edits vs GLM5 inference bug fix; not the same concrete issue.", "right": "pull_request:45016"}, {"accept": false, "left": "pull_request:43465", "reason": "gguf conversion/load fix vs model test edits; no shared bug or merge path.", "right": "pull_request:43665"}, {"accept": false, "left": "pull_request:43665", "reason": "Test edits vs processing_utils video kwargs fix; unrelated.", "right": "pull_request:43677"}, {"accept": false, "left": "pull_request:43665", "reason": "Test edits vs Slack workflow install step; different area and purpose.", "right": "pull_request:43851"}, {"accept": false, "left": "pull_request:43665", "reason": "Test edits vs Deepspeed WeightConverter fix; unrelated code path.", "right": "pull_request:43926"}, {"accept": false, "left": "pull_request:43665", "reason": "Test-only changes vs broad rope/vllm-related updates; not the same bug.", "right": "pull_request:45139"}, {"accept": false, "left": "pull_request:42772", "reason": "Tokenization_auto backend selection change vs fp8 dequant/model-loading work; different subsystems and symptoms.", "right": "pull_request:44815"}, {"accept": false, "left": "pull_request:43669", "reason": "ModernBERT dtype correction vs dequant/fp8 loading fix; unrelated.", "right": "pull_request:44815"}, {"accept": false, "left": "pull_request:44564", "reason": "GLM DSA attention fix vs dequant/fp8 fix; different concrete code paths.", "right": "pull_request:44815"}, {"accept": false, "left": "pull_request:44571", "reason": "Pegasus conversion/dependency fix vs fp8 dequant fix; unrelated.", "right": "pull_request:44815"}, {"accept": false, "left": "pull_request:44815", "reason": "FP8 dequant/model-loading changes vs NemotronH modular model cleanup; not the same issue.", "right": "pull_request:44876"}, {"accept": false, "left": "pull_request:42227", "reason": "Whisper return_language bug vs fp8 dequant fix; unrelated subsystems.", "right": "pull_request:44815"}, {"accept": false, "left": "pull_request:43544", "reason": "Docstring typo fix vs dequant/model-loading bug fix; no overlap.", "right": "pull_request:44815"}, {"accept": false, "left": "pull_request:44055", "reason": "Torch import protection vs fp8 dequant fix; different concerns.", "right": "pull_request:44815"}, {"accept": false, "left": "pull_request:44815", "reason": "FP8 dequant/model-loading work vs GLM5 inference bug fix; not mergeable as one fix.", "right": "pull_request:45016"}, {"accept": false, "left": "pull_request:43291", "reason": "Whisper tokenizer test/decode refactor vs fp8 dequant fix; unrelated and not the same bug.", "right": "pull_request:44815"}], "summary": "The cluster is heterogeneous: it mixes Whisper pipeline/tokenizer fixes, model-loading and quantization changes, workflow/docstring/test-only edits, and several unrelated model-specific bug fixes. None of the soft-edge pairs look like the same underlying bug or mergeable into one PR."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43665|pull_request:43669", "pull_request:43665|pull_request:44564", "pull_request:43665|pull_request:44571", "pull_request:43665|pull_request:44876", "pull_request:42227|pull_request:43665", "pull_request:43544|pull_request:43665", "pull_request:43665|pull_request:44053", "pull_request:43665|pull_request:44055", "pull_request:43665|pull_request:45016", "pull_request:43465|pull_request:43665", "pull_request:43665|pull_request:43677", "pull_request:43665|pull_request:43851", "pull_request:43665|pull_request:43926", "pull_request:43665|pull_request:45139", "pull_request:42772|pull_request:44815", "pull_request:43669|pull_request:44815", "pull_request:44564|pull_request:44815", "pull_request:44571|pull_request:44815", "pull_request:44815|pull_request:44876", "pull_request:42227|pull_request:44815", "pull_request:43544|pull_request:44815", "pull_request:44055|pull_request:44815", "pull_request:44815|pull_request:45016", "pull_request:43291|pull_request:44815"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20284, "estimated_input_tokens": 10014, "item_count": 17, "node_count": 17, "serialized_chars": 40054, "soft_pair_count": 22}, "cached_at": "2026-04-14T13:41:40Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a47b28100567cb4da2444f7019284309a21ac58667978ccee041908a18e42aa1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42772", "pull_request:43291", "pull_request:43453", "pull_request:43669", "pull_request:44343", "pull_request:44352", "pull_request:44364", "pull_request:44388", "pull_request:44398", "pull_request:44429", "pull_request:44456", "pull_request:44470", "pull_request:44515", "pull_request:44519", "pull_request:44564", "pull_request:44690", "pull_request:44876"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44564 is the best overall PR representative in this set because it is merged, narrowly fixes a specific code path, and matches the final ReLU behavior rather than just a comment or test-only adjustment.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44564 is the strongest canonical PR: it targets the same concrete GLM-MOE-DSA scoring bug, touches both implementation paths, has the explicit issue target, and is the merged fix version.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:42772", "reason": "Different bugs in different subsystems: tokenization backend selection vs Whisper decode/test changes.", "right": "pull_request:43291"}, {"accept": false, "left": "pull_request:42772", "reason": "Unrelated areas: auto tokenization backend mapping vs ModernBERT config dtype fix.", "right": "pull_request:43669"}, {"accept": false, "left": "pull_request:42772", "reason": "No shared code path or bug; tokenization backend selection is unrelated to GLM-MOE-DSA scoring.", "right": "pull_request:44564"}, {"accept": false, "left": "pull_request:42772", "reason": "Completely different model code paths and fixes; only a superficial PR similarity.", "right": "pull_request:44876"}, {"accept": false, "left": "pull_request:43291", "reason": "Whisper test/decode cleanup is unrelated to ModernBERT config typing.", "right": "pull_request:43669"}, {"accept": false, "left": "pull_request:43291", "reason": "Different model and different behavior; Whisper tests do not match GLM-MOE-DSA scoring logic.", "right": "pull_request:44564"}, {"accept": false, "left": "pull_request:43291", "reason": "Whisper test fix and NemotronH modular cleanup are unrelated changes.", "right": "pull_request:44876"}, {"accept": false, "left": "pull_request:43669", "reason": "ModernBERT norm_eps dtype fix is unrelated to GLM-MOE-DSA ReLU scoring.", "right": "pull_request:44564"}, {"accept": false, "left": "pull_request:43669", "reason": "Different model families and different bug classes.", "right": "pull_request:44876"}, {"accept": true, "left": "pull_request:44364", "reason": "Same issue target and same concrete fix: add ReLU in GLM-MOE-DSA indexer scoring across the two implementation files.", "right": "pull_request:44690"}, {"accept": true, "left": "pull_request:44398", "reason": "Same underlying GLM-MOE-DSA scoring bug; both patches insert the missing ReLU in the same code path.", "right": "pull_request:44690"}, {"accept": true, "left": "pull_request:44352", "reason": "Same loading_report ANSI/non-TTY bug; both change the same helper logic to suppress terminal styling when stdout is not a TTY.", "right": "pull_request:44388"}, {"accept": true, "left": "pull_request:44343", "reason": "Same loading_report ANSI escape fix, same file and same failure mode.", "right": "pull_request:44352"}, {"accept": true, "left": "pull_request:44343", "reason": "Same non-TTY ANSI styling bug in loading_report; the helper refactor still targets the same behavior.", "right": "pull_request:44429"}, {"accept": true, "left": "pull_request:44352", "reason": "Same loading_report fix, just a slightly different implementation of the same terminal-style suppression.", "right": "pull_request:44429"}, {"accept": true, "left": "pull_request:44364", "reason": "Same GLM-MOE-DSA ReLU omission in the scoring path; only the exact expression form differs.", "right": "pull_request:44398"}, {"accept": true, "left": "pull_request:44364", "reason": "Same missing-ReLU bug in the same indexer scoring logic.", "right": "pull_request:44470"}, {"accept": true, "left": "pull_request:44398", "reason": "Same concrete GLM-MOE-DSA scoring fix, with identical intent and code-path coverage.", "right": "pull_request:44470"}, {"accept": false, "left": "pull_request:43453", "reason": "Both touch tests/configuration, but they fix different model-specific failures and are not the same underlying bug.", "right": "pull_request:44519"}, {"accept": false, "left": "pull_request:43453", "reason": "Pad-token-id config propagation across several models is unrelated to a DepthPro integration-test dtype adjustment.", "right": "pull_request:44456"}, {"accept": false, "left": "pull_request:43453", "reason": "Different models and different failure modes; one is a config attribute fix, the other is a GPTNeo generation test tweak.", "right": "pull_request:44515"}], "summary": "There are two real duplicate clusters here: the loading_report ANSI/non-TTY fixes, and the GLM-MOE-DSA indexer ReLU fixes. The remaining PRs are unrelated single-purpose changes or test/config updates and should not be merged across clusters."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:42772|pull_request:43291", "pull_request:42772|pull_request:43669", "pull_request:42772|pull_request:44564", "pull_request:42772|pull_request:44876", "pull_request:43291|pull_request:43669", "pull_request:43291|pull_request:44564", "pull_request:43291|pull_request:44876", "pull_request:43669|pull_request:44564", "pull_request:43669|pull_request:44876", "pull_request:44364|pull_request:44690", "pull_request:44398|pull_request:44690", "pull_request:44352|pull_request:44388", "pull_request:44343|pull_request:44352", "pull_request:44343|pull_request:44429", "pull_request:44352|pull_request:44429", "pull_request:44364|pull_request:44398", "pull_request:44343|pull_request:44388", "pull_request:44364|pull_request:44470", "pull_request:44398|pull_request:44470", "pull_request:43453|pull_request:44519", "pull_request:43453|pull_request:44456", "pull_request:43453|pull_request:44515"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 11186, "estimated_input_tokens": 5465, "item_count": 8, "node_count": 8, "serialized_chars": 21858, "soft_pair_count": 24}, "cached_at": "2026-04-14T13:42:09Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "26f7c2b6a8e06c89e815792f6f1142c0adb2db7493cb83ad40357f9cbdfb008d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44191", "pull_request:44192", "pull_request:44193", "pull_request:44194", "pull_request:44195", "pull_request:44196", "pull_request:44197", "pull_request:44198"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No single PR is a good global representative because there is no shared underlying bug/change across the cluster; picking any one would be arbitrary and misleading.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the items address distinct problems in AudioFlamingo inference, ModelOutput assignment, auto video processor loading, optimizer arg coercion, AddedToken construction, config generation params, and tokenizer conversion. Similar titles and nearby dates are not enough to make them one change.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44196", "reason": "Both are config-related, but one fixes deprecated generation params on PretrainedConfig loading while the other changes GenerationConfig save-time validation. Different code paths and different bugs.", "right": "pull_request:44197"}, {"accept": false, "left": "pull_request:44196", "reason": "Unrelated areas: config loading for generation params vs tokenizer file conversion/fast tokenizer post-processing. No shared underlying fix.", "right": "pull_request:44198"}, {"accept": false, "left": "pull_request:44192", "reason": "ModelOutput __setattr__ assignment behavior is unrelated to config generation-parameter backward compatibility.", "right": "pull_request:44196"}, {"accept": false, "left": "pull_request:44192", "reason": "ModelOutput dataclass assignment bug is unrelated to tokenizer conversion logic.", "right": "pull_request:44198"}, {"accept": false, "left": "pull_request:44195", "reason": "AddedToken kwargs handling and GenerationConfig strict-save validation are different bugs in different modules.", "right": "pull_request:44197"}, {"accept": false, "left": "pull_request:44194", "reason": "SGD optimizer arg coercion has nothing to do with tokenizer conversion or detokenization.", "right": "pull_request:44198"}, {"accept": false, "left": "pull_request:44192", "reason": "ModelOutput attribute synchronization and optimizer argument handling are unrelated changes.", "right": "pull_request:44194"}, {"accept": false, "left": "pull_request:44194", "reason": "SGD optimizer kwargs parsing is not the same issue as config generation-parameter loading.", "right": "pull_request:44196"}, {"accept": false, "left": "pull_request:44193", "reason": "Auto video processor None-guard and config generation param handling are separate code paths and bugs.", "right": "pull_request:44196"}, {"accept": false, "left": "pull_request:44193", "reason": "Video processor lookup crash and AddedToken construction TypeError are unrelated.", "right": "pull_request:44195"}, {"accept": false, "left": "pull_request:44197", "reason": "GenerationConfig save validation and tokenizer JSON conversion are different changes with no common failure mode.", "right": "pull_request:44198"}, {"accept": false, "left": "pull_request:44195", "reason": "AddedToken special-flag handling is unrelated to tokenizer fast-file conversion logic.", "right": "pull_request:44198"}, {"accept": false, "left": "pull_request:44191", "reason": "AudioFlamingo batched inference fix is unrelated to SGD optimizer argument coercion.", "right": "pull_request:44194"}, {"accept": false, "left": "pull_request:44193", "reason": "Auto video processor crash fix and GenerationConfig strict-save validation are unrelated bugs.", "right": "pull_request:44197"}, {"accept": false, "left": "pull_request:44194", "reason": "Optimizer args parsing and AddedToken construction are unrelated subsystems.", "right": "pull_request:44195"}, {"accept": false, "left": "pull_request:44191", "reason": "AudioFlamingo batched inference and GenerationConfig invalid-save handling are different concrete bugs.", "right": "pull_request:44197"}, {"accept": false, "left": "pull_request:44191", "reason": "AudioFlamingo inference bug and config generation-param compatibility fix do not share the same code path.", "right": "pull_request:44196"}, {"accept": false, "left": "pull_request:44195", "reason": "AddedToken duplicate-argument fix is unrelated to configuration generation-parameter loading.", "right": "pull_request:44196"}, {"accept": false, "left": "pull_request:44194", "reason": "Optimizer SGD kwargs handling is unrelated to GenerationConfig save validation.", "right": "pull_request:44197"}, {"accept": false, "left": "pull_request:44191", "reason": "AudioFlamingo batched inference and AddedToken construction bug are unrelated.", "right": "pull_request:44195"}, {"accept": false, "left": "pull_request:44192", "reason": "ModelOutput key assignment and AddedToken kwargs handling are unrelated bugs.", "right": "pull_request:44195"}, {"accept": false, "left": "pull_request:44191", "reason": "AudioFlamingo batched inference and ModelOutput assignment bugs are unrelated.", "right": "pull_request:44192"}, {"accept": false, "left": "pull_request:44193", "reason": "Auto video processor None-check and optimizer arg coercion are unrelated.", "right": "pull_request:44194"}, {"accept": false, "left": "pull_request:44192", "reason": "ModelOutput setattr behavior and auto video processor loading crash are unrelated.", "right": "pull_request:44193"}], "summary": "This cluster is heterogeneous: the PRs touch unrelated subsystems and fix different bugs, so none should be merged as duplicates. All soft edges are similarity-only matches and should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44196|pull_request:44197", "pull_request:44196|pull_request:44198", "pull_request:44192|pull_request:44196", "pull_request:44192|pull_request:44198", "pull_request:44195|pull_request:44197", "pull_request:44194|pull_request:44198", "pull_request:44192|pull_request:44194", "pull_request:44194|pull_request:44196", "pull_request:44193|pull_request:44196", "pull_request:44193|pull_request:44195", "pull_request:44197|pull_request:44198", "pull_request:44195|pull_request:44198", "pull_request:44191|pull_request:44194", "pull_request:44193|pull_request:44197", "pull_request:44194|pull_request:44195", "pull_request:44191|pull_request:44197", "pull_request:44191|pull_request:44196", "pull_request:44195|pull_request:44196", "pull_request:44194|pull_request:44197", "pull_request:44191|pull_request:44195", "pull_request:44192|pull_request:44195", "pull_request:44191|pull_request:44192", "pull_request:44193|pull_request:44194", "pull_request:44192|pull_request:44193"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 17522, "estimated_input_tokens": 8633, "item_count": 15, "node_count": 15, "serialized_chars": 34531, "soft_pair_count": 24}, "cached_at": "2026-04-14T13:42:39Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c9fccd3d8e66bb4969012ba0e774939f2130109e9ab65cfc3e3cb51fd5281e29", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43998", "pull_request:44000", "pull_request:44001", "pull_request:44002", "pull_request:44004", "pull_request:44025", "pull_request:44026", "pull_request:44027", "pull_request:44028", "pull_request:44029", "pull_request:44030", "pull_request:44456", "pull_request:44515", "pull_request:44519", "pull_request:44934"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44025 is the best single representative of the main refactor subgroup: it has a broader, coherent scope than most others and clearly matches the repeated `can_return_tuple`/`auto_docstring` output-tracing pattern. The other PRs are separate model-specific refactors or unrelated integration-test fixes.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR represents the whole cluster. If one representative is needed, PR 44025 is the strongest candidate because it is a concrete output-tracing refactor spanning two closely related Depth Anything files and introduces the new `can_return_tuple` decorator in the expected pattern.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44456", "reason": "Both are test-fix PRs, but they target different failures and different files (`DepthProModelIntegrationTest` vs `Qwen2`/`T5` integration tests). The changes are unrelated and would not be merged as one PR.", "right": "pull_request:44934"}, {"accept": false, "left": "pull_request:44515", "reason": "Different failing tests in different models (`GPTNeo` generation vs `Qwen2`/`T5` integration tests). Same broad category, but not the same bug or change.", "right": "pull_request:44934"}, {"accept": false, "left": "pull_request:44519", "reason": "`MarianIntegrationTests` updates generation config assertions, while 44934 changes expected generation/test data for other models. No shared concrete code path.", "right": "pull_request:44934"}, {"accept": false, "left": "pull_request:43998", "reason": "Both are output-tracing refactors, but they touch different model implementations (`timm_backbone` vs `upernet`) and different forward paths. Same pattern, not the same change.", "right": "pull_request:44002"}, {"accept": false, "left": "pull_request:43998", "reason": "Different model code paths (`timm_backbone` vs `vision_text_dual_encoder`) with separate forward signatures; not a single fix.", "right": "pull_request:44000"}, {"accept": false, "left": "pull_request:44026", "reason": "`vision_encoder_decoder` and `rwkv` are unrelated model families; these are separate refactors, not one underlying bug.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:43998", "reason": "`timm_backbone` and `univnet` are separate output-tracing cleanups in different files with no shared concrete failure.", "right": "pull_request:44001"}, {"accept": false, "left": "pull_request:44029", "reason": "`rwkv` changes its layer/forward tracing and `dpr` changes encoder wrapper behavior; they do not fix the same code-path problem.", "right": "pull_request:44030"}, {"accept": false, "left": "pull_request:44025", "reason": "`depth_anything`/`prompt_depth_anything` vs `vision_encoder_decoder` are different implementations; same refactor theme, not the same PR.", "right": "pull_request:44026"}, {"accept": false, "left": "pull_request:44025", "reason": "`depth_anything` and `speech_encoder_decoder` are unrelated model paths; no evidence of a shared bug or patch.", "right": "pull_request:44027"}, {"accept": false, "left": "pull_request:44025", "reason": "`depth_anything` and `superpoint` are separate model stacks with different forward contracts; not mergeable as one fix.", "right": "pull_request:44028"}, {"accept": false, "left": "pull_request:44026", "reason": "Both are encoder-decoder wrappers, but they are separate model implementations (`vision` vs `speech`) and distinct code changes.", "right": "pull_request:44027"}, {"accept": false, "left": "pull_request:44026", "reason": "`vision_encoder_decoder` and `superpoint` do not share a concrete code path; the similarity is only in the refactor style.", "right": "pull_request:44028"}, {"accept": false, "left": "pull_request:44027", "reason": "`speech_encoder_decoder` and `superpoint` are unrelated changes in different files and model families.", "right": "pull_request:44028"}, {"accept": false, "left": "pull_request:44001", "reason": "`univnet` and `upernet` are different model implementations; these are independent output-tracing refactors.", "right": "pull_request:44002"}, {"accept": false, "left": "pull_request:44000", "reason": "Different model families (`vision_text_dual_encoder` vs `univnet`) and different forward signatures; no shared underlying bug.", "right": "pull_request:44001"}, {"accept": false, "left": "pull_request:44000", "reason": "`vision_text_dual_encoder` and `upernet` are unrelated code paths, so this is not one concrete fix.", "right": "pull_request:44002"}, {"accept": false, "left": "pull_request:43998", "reason": "`timm_backbone` and `rwkv` are unrelated model implementations; the shared phrasing does not imply a single bug.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:44002", "reason": "`upernet` and `rwkv` touch different forward paths and different files; not the same underlying issue.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:43998", "reason": "`timm_backbone` and `codegen` are separate output-tracing changes in different models, not a shared fix.", "right": "pull_request:44004"}, {"accept": false, "left": "pull_request:44001", "reason": "`univnet` and `codegen` are unrelated changes; they do not appear mergeable into one PR.", "right": "pull_request:44004"}, {"accept": false, "left": "pull_request:44002", "reason": "`upernet` and `codegen` target different code paths and different behavior, so they are not duplicates.", "right": "pull_request:44004"}, {"accept": false, "left": "pull_request:44025", "reason": "`depth_anything` vs `rwkv` are different model families and different implementation details; same refactor motif only.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:44025", "reason": "`depth_anything` and `dpr` are unrelated model code paths, so they are not the same concrete change.", "right": "pull_request:44030"}], "summary": "This cluster is a mix of unrelated PRs: several independent output-tracing refactors across different model implementations, plus separate test-fix PRs. They share a naming pattern but not the same concrete bug or change, so the soft links should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44456|pull_request:44934", "pull_request:44515|pull_request:44934", "pull_request:44519|pull_request:44934", "pull_request:43998|pull_request:44002", "pull_request:43998|pull_request:44000", "pull_request:44026|pull_request:44029", "pull_request:43998|pull_request:44001", "pull_request:44029|pull_request:44030", "pull_request:44025|pull_request:44026", "pull_request:44025|pull_request:44027", "pull_request:44025|pull_request:44028", "pull_request:44026|pull_request:44027", "pull_request:44026|pull_request:44028", "pull_request:44027|pull_request:44028", "pull_request:44001|pull_request:44002", "pull_request:44000|pull_request:44001", "pull_request:44000|pull_request:44002", "pull_request:43998|pull_request:44029", "pull_request:44002|pull_request:44029", "pull_request:43998|pull_request:44004", "pull_request:44001|pull_request:44004", "pull_request:44002|pull_request:44004", "pull_request:44025|pull_request:44029", "pull_request:44025|pull_request:44030"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20050, "estimated_input_tokens": 9897, "item_count": 18, "node_count": 18, "serialized_chars": 39586, "soft_pair_count": 22}, "cached_at": "2026-04-14T13:43:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "689e631471a9cd7f8c80b45668e8e53caa1526c2b13f06a3e1a7782a423470ef", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43324", "pull_request:43339", "pull_request:44000", "pull_request:44001", "pull_request:44002", "pull_request:44025", "pull_request:44026", "pull_request:44027", "pull_request:44028", "pull_request:44029", "pull_request:44030", "pull_request:44439", "pull_request:44456", "pull_request:44515", "pull_request:44519", "pull_request:44934", "pull_request:45048", "pull_request:45214"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "45214 is the most substantial and reusable representative: it fixes an actual forward-path/device issue in cohere_asr and verifies it with a targeted model-parallel test. The other PRs are mostly per-model expectation tweaks or refactors.", "canonical_issue_reason": null, "canonical_pr_reason": "No single true canonical duplicate in this mixed set. If one PR has to represent the cluster, 45214 is the strongest standalone fix because it changes production code and adds matching test coverage, unlike the many expectation-only updates.", "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44026", "reason": "Different models and code paths (vision_encoder_decoder vs DPR); same refactor theme, but not the same underlying bug/change.", "right": "pull_request:44030"}, {"accept": false, "left": "pull_request:44027", "reason": "Different models (speech_encoder_decoder vs RWKV) and different implementations; only the tracing-refactor pattern matches.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:44027", "reason": "Model-specific refactors in different code paths; not one concrete change that could be merged as a single PR.", "right": "pull_request:44030"}, {"accept": false, "left": "pull_request:44028", "reason": "SuperPoint output handling vs RWKV output tracing are unrelated fixes despite similar wording.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:44028", "reason": "Different model implementations and different forward signatures; not a duplicate fix.", "right": "pull_request:44030"}, {"accept": false, "left": "pull_request:44456", "reason": "DepthPro inference test adjustment vs GPTNeo generation test adjustment; unrelated failures and fixes.", "right": "pull_request:44515"}, {"accept": false, "left": "pull_request:44456", "reason": "Different integration tests for different models; no shared concrete bug.", "right": "pull_request:44519"}, {"accept": false, "left": "pull_request:44515", "reason": "GPTNeo generation test vs Marian integration test; separate expectations and separate models.", "right": "pull_request:44519"}, {"accept": false, "left": "pull_request:44439", "reason": "ProphetNet summarization test fix and DepthPro depth-estimation test fix are unrelated.", "right": "pull_request:44456"}, {"accept": false, "left": "pull_request:44439", "reason": "Different models and different failing tests; no common code-path problem.", "right": "pull_request:44515"}, {"accept": false, "left": "pull_request:44439", "reason": "ProphetNet integration test and Marian integration test are distinct fixes, not duplicates.", "right": "pull_request:44519"}, {"accept": false, "left": "pull_request:44934", "reason": "Qwen2/T5 expectation updates vs SmolLM3 generation test changes; unrelated model-specific fixes.", "right": "pull_request:45048"}, {"accept": false, "left": "pull_request:44001", "reason": "Both are output-tracing refactors, but for different models and forward paths; not the same underlying bug/change.", "right": "pull_request:44025"}, {"accept": false, "left": "pull_request:44001", "reason": "UnivNet vs vision_encoder_decoder are separate model refactors with no shared concrete fix.", "right": "pull_request:44026"}, {"accept": false, "left": "pull_request:44002", "reason": "UperNet and DepthAnything are different model code paths; similar refactor pattern only.", "right": "pull_request:44025"}, {"accept": false, "left": "pull_request:44000", "reason": "VisionTextDualEncoder and DepthAnything are unrelated model implementations; not one duplicate change.", "right": "pull_request:44025"}, {"accept": false, "left": "pull_request:44000", "reason": "Different models and files; the shared theme is only a broad tracing refactor.", "right": "pull_request:44026"}, {"accept": false, "left": "pull_request:44456", "reason": "DepthPro inference test fix vs SmolLM3 generation expectation change; unrelated.", "right": "pull_request:45048"}, {"accept": false, "left": "pull_request:44515", "reason": "GPTNeo generation test fix and SmolLM3 generation fix are separate model-specific test updates.", "right": "pull_request:45048"}, {"accept": false, "left": "pull_request:44519", "reason": "Marian integration test changes vs SmolLM3 generation changes; no shared concrete bug.", "right": "pull_request:45048"}, {"accept": false, "left": "pull_request:43324", "reason": "Minimax_m2 XPU expectation tweak and lw_detr XPU expectation tweak are different models and independent test baselines.", "right": "pull_request:43339"}, {"accept": false, "left": "pull_request:43324", "reason": "XPU test expectation update for Minimax_m2 versus a real cohere_asr device-placement bug fix; unrelated artifacts.", "right": "pull_request:45214"}], "summary": "This cluster is mostly a bundle of separate, model-specific PRs: several per-model output-tracing refactors, several unrelated integration-test expectation updates, and one concrete cohere_asr bug fix. The soft edges are false positives from broad textual similarity rather than true duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44026|pull_request:44030", "pull_request:44027|pull_request:44029", "pull_request:44027|pull_request:44030", "pull_request:44028|pull_request:44029", "pull_request:44028|pull_request:44030", "pull_request:44456|pull_request:44515", "pull_request:44456|pull_request:44519", "pull_request:44515|pull_request:44519", "pull_request:44439|pull_request:44456", "pull_request:44439|pull_request:44515", "pull_request:44439|pull_request:44519", "pull_request:44934|pull_request:45048", "pull_request:44001|pull_request:44025", "pull_request:44001|pull_request:44026", "pull_request:44002|pull_request:44025", "pull_request:44000|pull_request:44025", "pull_request:44000|pull_request:44026", "pull_request:44456|pull_request:45048", "pull_request:44515|pull_request:45048", "pull_request:44519|pull_request:45048", "pull_request:43324|pull_request:43339", "pull_request:43324|pull_request:45214"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 17402, "estimated_input_tokens": 8573, "item_count": 18, "node_count": 18, "serialized_chars": 34292, "soft_pair_count": 14}, "cached_at": "2026-04-14T13:43:39Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b90609554fe74ae22e1e9314d3b341a3d06dcdf2cf5de00a7c8a80e68998faca", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43324", "pull_request:43339", "pull_request:43464", "pull_request:43488", "pull_request:43500", "pull_request:43563", "pull_request:43615", "pull_request:43938", "pull_request:44179", "pull_request:44235", "pull_request:44456", "pull_request:44490", "pull_request:44515", "pull_request:44519", "pull_request:44527", "pull_request:44566", "pull_request:45190", "pull_request:45214"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No PR is a good cluster-wide representative because the cluster is not a coherent duplicate group; the closest-to-central items are still unrelated to the others.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR represents the cluster well: the items span unrelated models and subsystems, so there is no meaningful canonical PR for a duplicate set.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44179", "reason": "Both touch tokenization_auto.py, but one adds ministral support while the other adds fuyu to the incorrect-tokenizer-class override list; different models and different fixes.", "right": "pull_request:44235"}, {"accept": false, "left": "pull_request:44566", "reason": "Both are typing-related CLI refactors, but 44566 adds new typing protocols and broader annotations while 45190 changes casts across several files; related area, not the same fix.", "right": "pull_request:45190"}, {"accept": false, "left": "pull_request:43324", "reason": "Both are test expectation updates, but for different models (minimax_m2 vs exaone_moe) and different outputs; not the same bug.", "right": "pull_request:43938"}, {"accept": false, "left": "pull_request:43339", "reason": "Different models and different code paths: lw_detr test expectations vs cohere_asr model-parallel forward fix.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:43488", "reason": "Both are intentional bot/style check PRs in the ViT test file, but they are separate synthetic edits with different content and purpose.", "right": "pull_request:43563"}, {"accept": false, "left": "pull_request:43324", "reason": "Different model test updates for XPU outputs; they do not fix the same underlying issue.", "right": "pull_request:43615"}, {"accept": false, "left": "pull_request:43488", "reason": "Both are unrelated dummy bot-check PRs against the same file, but they are distinct synthetic changes rather than one shared fix.", "right": "pull_request:43500"}, {"accept": false, "left": "pull_request:43938", "reason": "Different model families and different fixes; exaone_moe test expectations are unrelated to cohere_asr model-parallel device placement.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:44519", "reason": "Both adjust failing integration tests, but for different models (Marian vs Musicgen) and different causes; not mergeable as one PR.", "right": "pull_request:44527"}, {"accept": false, "left": "pull_request:43464", "reason": "MarkupLM integration test dtype fix is unrelated to MusicgenStereo's dtype fix; different model and failure mode.", "right": "pull_request:44527"}, {"accept": false, "left": "pull_request:44456", "reason": "DepthPro integration test dtype change and MusicgenStereo dtype change are separate model-specific fixes.", "right": "pull_request:44527"}, {"accept": false, "left": "pull_request:44515", "reason": "GPTNeo generation length fix is unrelated to MusicgenStereo float32 loading; different test and underlying issue.", "right": "pull_request:44527"}, {"accept": false, "left": "pull_request:44490", "reason": "Both address device/parallelism concerns, but in different models and different tensor placements; too broad to treat as one duplicate fix.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:43324", "reason": "Minimax XPU test expectations and EuroBERT model-parallel pooling fix are unrelated model-specific issues.", "right": "pull_request:44490"}], "summary": "This cluster is a heterogeneous mix of unrelated PRs: model-specific test expectation updates, one-off integration test fixes, tokenizer mapping changes, typing cleanup, and a few intentionally noisy bot-check PRs. None of the soft edges look like the same underlying change, so no duplicate merge is justified."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44179|pull_request:44235", "pull_request:44566|pull_request:45190", "pull_request:43324|pull_request:43938", "pull_request:43339|pull_request:45214", "pull_request:43488|pull_request:43563", "pull_request:43324|pull_request:43615", "pull_request:43488|pull_request:43500", "pull_request:43938|pull_request:45214", "pull_request:44519|pull_request:44527", "pull_request:43464|pull_request:44527", "pull_request:44456|pull_request:44527", "pull_request:44515|pull_request:44527", "pull_request:44490|pull_request:45214", "pull_request:43324|pull_request:44490"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22856, "estimated_input_tokens": 11300, "item_count": 18, "node_count": 18, "serialized_chars": 45199, "soft_pair_count": 15}, "cached_at": "2026-04-14T13:44:08Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b9c55877f77b7902af91e2be1d8cede3b528faaa0104d830a9d7fa0aa1b478cd", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43339", "pull_request:43500", "pull_request:43563", "pull_request:43879", "pull_request:43902", "pull_request:43913", "pull_request:43938", "pull_request:44037", "pull_request:44321", "pull_request:44330", "pull_request:44353", "pull_request:44428", "pull_request:44482", "pull_request:44490", "pull_request:44602", "pull_request:44657", "pull_request:44733", "pull_request:44828"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44330 is the best single representative because it is substantive, merged, and central to a real cross-model refactor rather than a test-only or doc-only tweak.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44330 is the strongest representative of a substantial, coherent code change: it starts the broad cache_position removal across multiple models and has clear code-path impact. The other accepted PRs are either follow-up expansions of the same refactor or narrower doc updates.", "confidence": 0.77, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43913", "reason": "Same qwen3_vl_moe weight-mapping/conversion problem in the same files; 44037 looks like a refined follow-up to the same concrete fix.", "right": "pull_request:44037"}, {"accept": false, "left": "pull_request:43500", "reason": "Both touch the same ViT test file, but the diffs are unrelated bot/style noise and do not fix the same bug or change.", "right": "pull_request:43563"}, {"accept": false, "left": "pull_request:43938", "reason": "Different models and different problems: exaone_moe test expectations vs eurobert parallelism masking fix.", "right": "pull_request:44490"}, {"accept": false, "left": "pull_request:43339", "reason": "Different model families and different change types: lw_detr XPU expectations vs voxtral_realtime skipped invalid tests.", "right": "pull_request:44321"}, {"accept": false, "left": "pull_request:44428", "reason": "Both are XPU expectation updates, but for unrelated models and unrelated failing behaviors.", "right": "pull_request:44482"}, {"accept": false, "left": "pull_request:44482", "reason": "Unrelated models and fixes; qwen2/jamba flash-attn bug work is not the same underlying change as higgs_audio_v2 expectations.", "right": "pull_request:44733"}, {"accept": false, "left": "pull_request:43339", "reason": "Same broad test-expectation theme, but different models and no shared bug/change path.", "right": "pull_request:43938"}, {"accept": false, "left": "pull_request:44428", "reason": "Different models and different underlying fixes; only superficial similarity in touching tests/expectations.", "right": "pull_request:44733"}, {"accept": true, "left": "pull_request:44330", "reason": "Both are iterations of the same repo-wide effort to remove cache_position from more models; the second extends the same refactor.", "right": "pull_request:44602"}, {"accept": false, "left": "pull_request:44353", "reason": "Different models and unrelated expectation updates; no common concrete bug.", "right": "pull_request:44482"}, {"accept": false, "left": "pull_request:44353", "reason": "Different model-specific expectation refreshes with no shared code path or fix.", "right": "pull_request:44428"}, {"accept": false, "left": "pull_request:44353", "reason": "Different scope and different models; not the same underlying change.", "right": "pull_request:44733"}, {"accept": false, "left": "pull_request:44490", "reason": "Eurobert model-parallelism fix and Electra test BF16 mismatch are unrelated.", "right": "pull_request:44657"}, {"accept": true, "left": "pull_request:43879", "reason": "Both are the same stale installation/docs tech-stack update across different language/document sets; they look like successive coverage of the same documentation correction.", "right": "pull_request:43902"}, {"accept": true, "left": "pull_request:44330", "reason": "Same cache_position-removal refactor series, with 44828 continuing the same concrete change to additional models.", "right": "pull_request:44828"}], "summary": "The set breaks into a few distinct PR tracks: qwen3_vl_moe conversion mapping, docs stack/version updates, cache_position removals, and several unrelated test-expected-value or skip-test fixes. Most pairs are not duplicates; a few look like successive PRs on the same concrete change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43913|pull_request:44037", "pull_request:43500|pull_request:43563", "pull_request:43938|pull_request:44490", "pull_request:43339|pull_request:44321", "pull_request:44428|pull_request:44482", "pull_request:44482|pull_request:44733", "pull_request:43339|pull_request:43938", "pull_request:44428|pull_request:44733", "pull_request:44330|pull_request:44602", "pull_request:44353|pull_request:44482", "pull_request:44353|pull_request:44428", "pull_request:44353|pull_request:44733", "pull_request:44490|pull_request:44657", "pull_request:43879|pull_request:43902", "pull_request:44330|pull_request:44828"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20670, "estimated_input_tokens": 10207, "item_count": 18, "node_count": 18, "serialized_chars": 40827, "soft_pair_count": 11}, "cached_at": "2026-04-14T13:44:26Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "cfad5945b2dab2d0fe4729db9d443e6d25a124f4f80fe9560ada60497d2e2f1a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42230", "pull_request:43488", "pull_request:43740", "pull_request:43787", "pull_request:43793", "pull_request:43884", "pull_request:43907", "pull_request:43910", "pull_request:43936", "pull_request:43938", "pull_request:44034", "pull_request:44353", "pull_request:44482", "pull_request:44657", "pull_request:44808", "pull_request:45204", "pull_request:45209", "pull_request:45212"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44808 is the strongest cluster representative because it contains the same feature area as 43793 but in a more complete form and with the shared files/fix scope already consolidated.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44808 is the broader, later implementation and already includes PP-OCRv5_mobile_rec along with the related server_rec support, so it best represents the duplicated model-support work.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43907", "reason": "Both only update expected outputs for different model tests; they do not fix the same concrete bug or change.", "right": "pull_request:43910"}, {"accept": false, "left": "pull_request:42230", "reason": "One fixes an attention-mask code path for XPU, the other adds XPU test support for a different model; these are unrelated changes.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:43740", "reason": "Both remove PT 2.4+ compatibility code, but they are broad cleanup PRs touching different code paths and scopes, not a single duplicate fix.", "right": "pull_request:43787"}, {"accept": false, "left": "pull_request:42230", "reason": "Different targets: XPU attention-mask handling versus xpu expectations for olmo_hybrid tests.", "right": "pull_request:44353"}, {"accept": false, "left": "pull_request:42230", "reason": "Different targets: an XPU masking fix versus XPU test expectation updates for higgs_audio_v2.", "right": "pull_request:44482"}, {"accept": false, "left": "pull_request:43936", "reason": "Both address failed tests, but for different models and different code/tests; not the same underlying change.", "right": "pull_request:43938"}, {"accept": false, "left": "pull_request:43488", "reason": "Both are 'don't merge' workflow-check PRs, but they touch different test files and are separate intentional-failure changes.", "right": "pull_request:43884"}, {"accept": false, "left": "pull_request:43488", "reason": "Same rationale as above: separate intentional workflow-check sabotage PRs, not one concrete duplicate fix.", "right": "pull_request:44034"}, {"accept": false, "left": "pull_request:45209", "reason": "One makes nomic_bert tests device-generic; the other adds XPU test support for musicflamingo. Different models and purposes.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:44657", "reason": "Different bug fixes in different models: Electra test config mismatch versus Videomt device placement fix.", "right": "pull_request:45204"}, {"accept": true, "left": "pull_request:43793", "reason": "Both add PP-OCRv5_mobile_rec support; 44808 is the later, broader PR that includes the same mobile-recognizer work plus server-recognizer support.", "right": "pull_request:44808"}], "summary": "Most pairs are not true duplicates: they target different models or unrelated test/workflow changes. The only clear duplicate/overlap is the PP-OCRv5 mobile-recognizer support PR, which is subsumed by the later combined PP-OCRv5 server-recognizer + mobile-recognizer support PR."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43907|pull_request:43910", "pull_request:42230|pull_request:45212", "pull_request:43740|pull_request:43787", "pull_request:42230|pull_request:44353", "pull_request:42230|pull_request:44482", "pull_request:43936|pull_request:43938", "pull_request:43488|pull_request:43884", "pull_request:43488|pull_request:44034", "pull_request:45209|pull_request:45212", "pull_request:44657|pull_request:45204", "pull_request:43793|pull_request:44808"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22034, "estimated_input_tokens": 10889, "item_count": 18, "node_count": 18, "serialized_chars": 43556, "soft_pair_count": 20}, "cached_at": "2026-04-14T13:45:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e1d2de872efc28b115dc60dce5cfd42be0a1ffd406d30ac3f579158232aa03ac", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42230", "pull_request:43341", "pull_request:43553", "pull_request:43554", "pull_request:43555", "pull_request:43758", "pull_request:43795", "pull_request:43907", "pull_request:43910", "pull_request:43936", "pull_request:44412", "pull_request:44647", "pull_request:44808", "pull_request:45204", "pull_request:45209", "pull_request:45212", "pull_request:45415", "pull_request:45425"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44808 is the strongest cluster representative overall because it is merged, broad in scope, and has the most complete code/docs/test integration among the candidate PRs.", "canonical_issue_reason": null, "canonical_pr_reason": "44808 is the best representative merged PR: it is the broadest complete artifact in the only substantial overlapping change-set (PP-OCRv5 server/mobile OCR support) and subsumes the earlier server_rec-only support work.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43795", "reason": "44808 explicitly includes PP-OCRv5_server_rec support and appears to subsume the earlier server_rec-only PR; same underlying model-support change.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:45204", "reason": "Different models and different fixes: VideOMT device mismatch in model code vs Nomic BERT test expectation normalization.", "right": "pull_request:45209"}, {"accept": false, "left": "pull_request:42230", "reason": "Generic attention-mask/XPU handling vs a VideOMT model-specific device mismatch fix; not the same concrete change.", "right": "pull_request:45204"}, {"accept": false, "left": "pull_request:42230", "reason": "Different code paths and models; the XPU attention-mask fix is unrelated to the Nomic BERT test update.", "right": "pull_request:45209"}, {"accept": false, "left": "pull_request:45204", "reason": "Separate model areas and different goals: VideOMT device fix vs MusicFlamingo XPU test support.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:44412", "reason": "Both are typing-related, but they target different scopes and files; this is not the same concrete change.", "right": "pull_request:45415"}, {"accept": false, "left": "pull_request:43341", "reason": "Different models and different test issues: skipping unsupported GLM tests vs updating Jais2 expected logits.", "right": "pull_request:43910"}, {"accept": false, "left": "pull_request:43341", "reason": "Unrelated model test changes; same broad theme of test stabilization is too weak for duplicate triage.", "right": "pull_request:43936"}, {"accept": false, "left": "pull_request:43910", "reason": "Distinct model-specific test updates with no shared code path or underlying bug.", "right": "pull_request:43936"}, {"accept": false, "left": "pull_request:43341", "reason": "Same test file, but different fixes: xfailing unsupported tests vs changing expected outputs and determinism handling.", "right": "pull_request:43907"}, {"accept": false, "left": "pull_request:43907", "reason": "Different models and different fix types; not the same underlying issue.", "right": "pull_request:43936"}, {"accept": false, "left": "pull_request:44647", "reason": "Continuous batching device support and MusicFlamingo XPU test support are unrelated.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:43341", "reason": "Different models and different failure modes; no shared concrete bug or change.", "right": "pull_request:45204"}, {"accept": false, "left": "pull_request:43910", "reason": "Jais2 test expectation update and VideOMT device fix are unrelated.", "right": "pull_request:45204"}, {"accept": false, "left": "pull_request:43907", "reason": "GLM image test stabilization and VideOMT device mismatch fix are separate model-specific changes.", "right": "pull_request:45204"}, {"accept": false, "left": "pull_request:45415", "reason": "Both concern typing, but 45425 is a narrower follow-up for modeling_utils and device-mesh typing; not the same PR-worthy change.", "right": "pull_request:45425"}, {"accept": true, "left": "pull_request:43553", "reason": "These are sequential edits to the same workflow file and bot configuration; they are part of the same workflow-change series.", "right": "pull_request:43554"}, {"accept": true, "left": "pull_request:43553", "reason": "Same workflow file and same PR-bot permission/configuration effort; 43555 is a direct follow-up to the initial workflow change.", "right": "pull_request:43555"}, {"accept": false, "left": "pull_request:43758", "reason": "Different models and different test stabilization tasks; not the same change.", "right": "pull_request:43910"}, {"accept": true, "left": "pull_request:43554", "reason": "Both are incremental updates to the same pr-style-bot workflow configuration, so they belong to the same underlying change-set.", "right": "pull_request:43555"}], "summary": "This cluster is mostly heterogeneous. The only clear true overlaps are the incremental PR-bot workflow edits and the PP-OCRv5_server_rec support series; the rest are distinct model-specific fixes, test updates, or broad-but-separate typing/device changes."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43795|pull_request:44808", "pull_request:45204|pull_request:45209", "pull_request:42230|pull_request:45204", "pull_request:42230|pull_request:45209", "pull_request:45204|pull_request:45212", "pull_request:44412|pull_request:45415", "pull_request:43341|pull_request:43910", "pull_request:43341|pull_request:43936", "pull_request:43910|pull_request:43936", "pull_request:43341|pull_request:43907", "pull_request:43907|pull_request:43936", "pull_request:44647|pull_request:45212", "pull_request:43341|pull_request:45204", "pull_request:43910|pull_request:45204", "pull_request:43907|pull_request:45204", "pull_request:45415|pull_request:45425", "pull_request:43553|pull_request:43554", "pull_request:43553|pull_request:43555", "pull_request:43758|pull_request:43910", "pull_request:43554|pull_request:43555"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 23758, "estimated_input_tokens": 11751, "item_count": 18, "node_count": 18, "serialized_chars": 47001, "soft_pair_count": 21}, "cached_at": "2026-04-14T13:45:44Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ab9d4e85afc5b4ec9c79e07d36c715e1eb4b3086f867b86ab5d443369d028d9f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43098", "pull_request:43247", "pull_request:43274", "pull_request:43345", "pull_request:43486", "pull_request:43500", "pull_request:43532", "pull_request:43554", "pull_request:43558", "pull_request:43563", "pull_request:43588", "pull_request:43767", "pull_request:43884", "pull_request:44034", "pull_request:44125", "pull_request:44544", "pull_request:44808", "pull_request:45078"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44808 is the best overall PR representative here because it is the hub of the OCRv5 family and the most complete, merged model-support change among the related submissions.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44808 is the most central representative of the largest cohesive subcluster (PP-OCRv5 support), with inbound references and the broadest related model/auto-registry changes.", "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43884", "reason": "Both are test/workflow prank-style PRs, but they touch different files and different injected changes; they are not the same underlying fix.", "right": "pull_request:44034"}, {"accept": false, "left": "pull_request:43486", "reason": "Unrelated fixes: one changes batched video handling, the other adjusts loading-report ANSI styling. Same small-PR shape, different bugs/code paths.", "right": "pull_request:44544"}, {"accept": false, "left": "pull_request:43098", "reason": "Both are PP model-support additions, but for different models (DocLayoutV3 vs OCRv5 mobile/server rec). Similar scaffold, not the same change.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:43247", "reason": "Different model families and different concrete additions (PPLCNet/PP-OCRv5 mobile det vs OCRv5 server/mobile rec). Not mergeable into one PR.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:43500", "reason": "Same test file appears, but the inserted content and surrounding intent differ; these are separate dummy changes, not one duplicated fix.", "right": "pull_request:43884"}, {"accept": false, "left": "pull_request:43500", "reason": "Both are test-file edits, but they target different PRs and different inserted content; no evidence of the same underlying bug/change.", "right": "pull_request:44034"}, {"accept": false, "left": "pull_request:43588", "reason": "Different code paths: Qwen3 Omni video/image feature typing vs auto video-processor loading error handling. Not the same fix.", "right": "pull_request:44125"}, {"accept": false, "left": "pull_request:44125", "reason": "Video processor error handling and tokenizer conversion/error handling are unrelated subsystems and changes.", "right": "pull_request:45078"}, {"accept": false, "left": "pull_request:44125", "reason": "Video processor loading and loading-report ANSI formatting are unrelated changes.", "right": "pull_request:44544"}, {"accept": false, "left": "pull_request:43563", "reason": "Same test file, but the diffs are different dummy modifications; not a duplicate underlying change.", "right": "pull_request:43884"}, {"accept": false, "left": "pull_request:43563", "reason": "They share a test file and prank-style edits, but the inserted assertions/lines differ and the PRs target different checks.", "right": "pull_request:44034"}, {"accept": false, "left": "pull_request:43554", "reason": "Both are bot-permission/style-check related, but they modify different workflow settings and are not the same concrete change.", "right": "pull_request:43558"}, {"accept": true, "left": "pull_request:43558", "reason": "These appear to be the same exact test-file patch (same inserted lines in the same location), so they are duplicate submissions of the same change.", "right": "pull_request:43563"}, {"accept": false, "left": "pull_request:43274", "reason": "Both are OCRv5-related model-support PRs, but one adds server_det while the other adds server_rec/mobile_rec. Similar family, different concrete models.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:43345", "reason": "PP-LCNet support and PP-OCRv5 support are different model additions; overlap is only in shared registry/docs boilerplate.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:43767", "reason": "PP-Chart2Table and PP-OCRv5 are different model families; shared auto/docs updates are boilerplate, not the same change.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:43500", "reason": "One is a test-file edit, the other is a workflow YAML change. They are not the same underlying fix.", "right": "pull_request:43532"}, {"accept": false, "left": "pull_request:43098", "reason": "Different model-support PRs for different models, despite similar registry/docs boilerplate.", "right": "pull_request:43247"}, {"accept": false, "left": "pull_request:43098", "reason": "DocLayoutV3 and OCRv5_server_det are different model additions with different code paths.", "right": "pull_request:43274"}, {"accept": false, "left": "pull_request:43098", "reason": "Different model families (DocLayoutV3 vs PPLCNet); shared scaffolding is not enough to treat them as duplicates.", "right": "pull_request:43345"}, {"accept": false, "left": "pull_request:43098", "reason": "Different model families and implementations; only the docs/auto wiring pattern is shared.", "right": "pull_request:43767"}], "summary": "This is a mixed PR cluster: several unrelated model-support additions, a few workflow/test-only PRs, and one near-exact duplicate pair. Only the two identical style-check test PRs look like true duplicates; the rest are similar by template or subsystem but change different models/code paths."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43884|pull_request:44034", "pull_request:43486|pull_request:44544", "pull_request:43098|pull_request:44808", "pull_request:43247|pull_request:44808", "pull_request:43500|pull_request:43884", "pull_request:43500|pull_request:44034", "pull_request:43588|pull_request:44125", "pull_request:44125|pull_request:45078", "pull_request:44125|pull_request:44544", "pull_request:43563|pull_request:43884", "pull_request:43563|pull_request:44034", "pull_request:43554|pull_request:43558", "pull_request:43558|pull_request:43563", "pull_request:43274|pull_request:44808", "pull_request:43345|pull_request:44808", "pull_request:43767|pull_request:44808", "pull_request:43500|pull_request:43532", "pull_request:43098|pull_request:43247", "pull_request:43098|pull_request:43274", "pull_request:43098|pull_request:43345", "pull_request:43098|pull_request:43767"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 16080, "estimated_input_tokens": 7912, "item_count": 17, "node_count": 17, "serialized_chars": 31647, "soft_pair_count": 18}, "cached_at": "2026-04-14T13:46:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f683b619c634a3f4714b3909278a0bed7df0d06de2e59341e819914bb65f042e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43098", "pull_request:43247", "pull_request:43274", "pull_request:43345", "pull_request:43445", "pull_request:43532", "pull_request:43767", "pull_request:43793", "pull_request:43795", "pull_request:43884", "pull_request:44034", "pull_request:44053", "pull_request:44413", "pull_request:44827", "pull_request:45286", "pull_request:45287", "pull_request:45288"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "43767 is the cleanest representative of substantive model-support work because it combines docs, auto-registration, and conversion-mapping plumbing, but it still does not duplicate the other PRs.", "canonical_issue_reason": null, "canonical_pr_reason": "No true canonical PR: the items do not share one underlying bug or change; they are separate PRs for different models, workflows, or fixes.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43098", "reason": "Different model additions: PP-DocLayoutV3 vs PP-OCRv5_mobile_rec. Same boilerplate files, but unrelated changes.", "right": "pull_request:43793"}, {"accept": false, "left": "pull_request:43098", "reason": "Different model additions: PP-DocLayoutV3 vs PP-OCRv5_server_rec. Not the same code-path or bug.", "right": "pull_request:43795"}, {"accept": false, "left": "pull_request:43247", "reason": "Different variants: PP-OCRv5_mobile_det vs PP-OCRv5_server_det. Separate model support PRs, not one fix.", "right": "pull_request:43274"}, {"accept": false, "left": "pull_request:43247", "reason": "Different models: PP-OCRv5_mobile_det vs PP-LCNet. Shared framework plumbing is incidental.", "right": "pull_request:43345"}, {"accept": false, "left": "pull_request:43247", "reason": "Different model families: PP-OCRv5_mobile_det vs PP-Chart2Table. Not the same underlying change.", "right": "pull_request:43767"}, {"accept": false, "left": "pull_request:43247", "reason": "Detector vs recognizer: PP-OCRv5_mobile_det and PP-OCRv5_mobile_rec are distinct model components and code paths.", "right": "pull_request:43793"}, {"accept": false, "left": "pull_request:43247", "reason": "Different OCRv5 variants and tasks: mobile_det vs server_rec. Not mergeable as one duplicate fix.", "right": "pull_request:43795"}, {"accept": false, "left": "pull_request:43274", "reason": "Different models: PP-OCRv5_server_det vs PP-LCNet. Same repo area, different support work.", "right": "pull_request:43345"}, {"accept": false, "left": "pull_request:43274", "reason": "Different model additions: PP-OCRv5_server_det vs PP-Chart2Table. Separate PRs with different code paths.", "right": "pull_request:43767"}, {"accept": false, "left": "pull_request:43274", "reason": "Different tasks: PP-OCRv5_server_det vs PP-OCRv5_mobile_rec. One is detection, the other recognition.", "right": "pull_request:43793"}, {"accept": false, "left": "pull_request:43274", "reason": "Detector vs recognizer for different OCRv5 variants; these are separate model support changes.", "right": "pull_request:43795"}, {"accept": false, "left": "pull_request:43532", "reason": "Unrelated changes: workflow/config edits versus a test sabotage PR. Different intent and files.", "right": "pull_request:43884"}, {"accept": false, "left": "pull_request:43532", "reason": "Workflow change vs test-file edits in CLIP/ViT. Not the same bug or code path.", "right": "pull_request:44034"}, {"accept": false, "left": "pull_request:43445", "reason": "Both touch MoE, but they fix different issues: conversion mapping/model integration versus grouped-linear behavior and Mistral4 tests.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:45286", "reason": "Different model test files (nomic_bert vs videomt). These are unrelated auto-fix PRs.", "right": "pull_request:45287"}, {"accept": false, "left": "pull_request:45286", "reason": "Different model test targets: Nomic BERT vs Cohere ASR. No shared underlying bug.", "right": "pull_request:45288"}, {"accept": false, "left": "pull_request:45287", "reason": "Different model test suites and failures: Videomt vs Cohere ASR. Not duplicates.", "right": "pull_request:45288"}, {"accept": false, "left": "pull_request:44053", "reason": "Both are peft.py fixes, but they address different mapping bugs in different lines/functions; too small and distinct to merge as one PR.", "right": "pull_request:44413"}], "summary": "This cluster is heterogeneous: separate model-support PRs for different PP models, unrelated CI/test manipulation PRs, and a few distinct small bug fixes. No soft pair looks like the same underlying change, so none should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43098|pull_request:43793", "pull_request:43098|pull_request:43795", "pull_request:43247|pull_request:43274", "pull_request:43247|pull_request:43345", "pull_request:43247|pull_request:43767", "pull_request:43247|pull_request:43793", "pull_request:43247|pull_request:43795", "pull_request:43274|pull_request:43345", "pull_request:43274|pull_request:43767", "pull_request:43274|pull_request:43793", "pull_request:43274|pull_request:43795", "pull_request:43532|pull_request:43884", "pull_request:43532|pull_request:44034", "pull_request:43445|pull_request:44827", "pull_request:45286|pull_request:45287", "pull_request:45286|pull_request:45288", "pull_request:45287|pull_request:45288", "pull_request:44053|pull_request:44413"], "split": true, "trimmed": true} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 15892, "estimated_input_tokens": 7818, "item_count": 18, "node_count": 18, "serialized_chars": 31271, "soft_pair_count": 23}, "cached_at": "2026-04-14T13:46:35Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c5c81fe79f648ed32ca522eb4b789358f1ed7e32d5e18f1e17501f85c29c5425", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41145", "pull_request:42227", "pull_request:43098", "pull_request:43247", "pull_request:43274", "pull_request:43291", "pull_request:43345", "pull_request:43385", "pull_request:43707", "pull_request:44395", "pull_request:44413", "pull_request:44542", "pull_request:44634", "pull_request:45286", "pull_request:45287", "pull_request:45288", "pull_request:45403", "pull_request:45410"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43707 is the most representative completed PR in this batch: it is merged, substantial, and clearly self-contained model-support work.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR is a duplicate canonical for the whole set; if one representative is needed, PR 43707 is the strongest generic model-support exemplar because it is merged and has broad registry/docs updates.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44395", "reason": "Different fixes in different paths: kernel security/loading vs backend dependency for a tokenizer model.", "right": "pull_request:44542"}, {"accept": false, "left": "pull_request:44413", "reason": "PEFT conversion mapping cleanup is unrelated to the backend dependency fix.", "right": "pull_request:44542"}, {"accept": false, "left": "pull_request:44542", "reason": "Backend dependency and LFM2 kernel-path fixes address different bugs and code paths.", "right": "pull_request:44634"}, {"accept": false, "left": "pull_request:42227", "reason": "Both are Whisper-related, but one fixes return-language behavior in the ASR pipeline while the other fixes Whisper tests/tokenization; not the same underlying bug.", "right": "pull_request:43291"}, {"accept": false, "left": "pull_request:45286", "reason": "Different models and code paths: Nomic BERT tests vs AltCLIP model logic.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:45287", "reason": "VideoMT test cleanup is unrelated to the AltCLIP model fix.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:45288", "reason": "Cohere ASR test cleanup is unrelated to the AltCLIP model fix.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:45403", "reason": "Common test adjustments do not match the AltCLIP forward-path fix.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:45286", "reason": "Different test files for different models; only share the generic 'failing tests' theme.", "right": "pull_request:45403"}, {"accept": false, "left": "pull_request:45287", "reason": "Different model test files and likely unrelated failures.", "right": "pull_request:45403"}, {"accept": false, "left": "pull_request:45288", "reason": "Different model test files and likely unrelated failures.", "right": "pull_request:45403"}, {"accept": false, "left": "pull_request:43385", "reason": "Both are model-support PRs, but they add support for different models (UVDoc vs SLANeXt) and cannot be merged as one change.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:43098", "reason": "Different model-support additions: PP-DocLayoutV3 vs UVDoc.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:43098", "reason": "Different model-support additions: PP-DocLayoutV3 vs SLANeXt.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:43247", "reason": "Different model-support additions: PP-OCRv5_mobile_det vs UVDoc.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:43247", "reason": "Different model-support additions: PP-OCRv5_mobile_det vs SLANeXt.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:43274", "reason": "Different model-support additions: PP-OCRv5_server_det vs UVDoc.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:43274", "reason": "Different model-support additions: PP-OCRv5_server_det vs SLANeXt.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:43345", "reason": "Different model-support additions: PP-LCNet vs UVDoc.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:43345", "reason": "Different model-support additions: PP-LCNet vs SLANeXt.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:41145", "reason": "Qwen3 Rope kernel support and UVDoc model support are unrelated changes.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:41145", "reason": "Qwen3 Rope kernel support and SLANeXt model support are unrelated changes.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:41145", "reason": "Qwen3 Rope kernel support and PP-DocLayoutV3 support are unrelated changes.", "right": "pull_request:43098"}], "summary": "These PRs form several superficial similarity groups (model support additions, kernel-related fixes, whisper/test fixes), but they target different concrete models or code paths and are not true duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44395|pull_request:44542", "pull_request:44413|pull_request:44542", "pull_request:44542|pull_request:44634", "pull_request:42227|pull_request:43291", "pull_request:45286|pull_request:45410", "pull_request:45287|pull_request:45410", "pull_request:45288|pull_request:45410", "pull_request:45403|pull_request:45410", "pull_request:45286|pull_request:45403", "pull_request:45287|pull_request:45403", "pull_request:45288|pull_request:45403", "pull_request:43385|pull_request:43707", "pull_request:43098|pull_request:43385", "pull_request:43098|pull_request:43707", "pull_request:43247|pull_request:43385", "pull_request:43247|pull_request:43707", "pull_request:43274|pull_request:43385", "pull_request:43274|pull_request:43707", "pull_request:43345|pull_request:43385", "pull_request:43345|pull_request:43707", "pull_request:41145|pull_request:43385", "pull_request:41145|pull_request:43707", "pull_request:41145|pull_request:43098"], "split": true, "trimmed": true} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 12964, "estimated_input_tokens": 6354, "item_count": 11, "node_count": 11, "serialized_chars": 25413, "soft_pair_count": 6}, "cached_at": "2026-04-14T13:47:04Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "67b6608f0006c7dc67d340ce7c10c4fca44f559ee89e0d3429f9772f00316a51", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43291", "pull_request:43385", "pull_request:43647", "pull_request:43648", "pull_request:43665", "pull_request:43707", "pull_request:44053", "pull_request:44571", "pull_request:44815", "pull_request:44994", "pull_request:45410"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43707 is the strongest standalone PR in the set: it is merged, substantial, and self-contained (docs, auto-registry, implementation, and tests for SLANeXt). It still does not represent the rest of the cluster as duplicates.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR: the items split across unrelated topics (UVDoc/SLANeXt/sarvam model support, Whisper/AltCLIP/CLIP/ViT test tweaks, workflow edits, and PEFT/Pegasus/fp8 fixes).", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44053", "reason": "Different fixes in different code paths: PEFT weight-mapping typo vs Pegasus conversion/dependency changes. Same broad area (conversion) but not the same bug or mergeable change.", "right": "pull_request:44571"}, {"accept": false, "left": "pull_request:43647", "reason": "Both touch the self-comment CI workflow, but the diffs are not the same fix: one comments out concurrency while the other removes it and changes job gating. Too different to treat as duplicates.", "right": "pull_request:43648"}, {"accept": false, "left": "pull_request:43385", "reason": "Both are model-support PRs, but for different models (UVDoc vs sarvam) and different files/code paths. Shared framework work is not enough for duplicate status.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:43707", "reason": "Different model additions with different implementations and tests. They are not the same underlying change.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:43291", "reason": "Whisper tokenizer test fix vs AltCLIP failing-test fix; unrelated models and code paths.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:43665", "reason": "Test-only edits in CLIP/ViT are not the same as the FP8/core loading dequantization fix. Different problems and patch scopes.", "right": "pull_request:44815"}], "summary": "The cluster is a mix of unrelated PRs: several model-support additions, a few workflow/test-only changes, and separate bug/dependency fixes. They share superficial tooling or model-repo overlap, but not the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44053|pull_request:44571", "pull_request:43647|pull_request:43648", "pull_request:43385|pull_request:44994", "pull_request:43707|pull_request:44994", "pull_request:43291|pull_request:45410", "pull_request:43665|pull_request:44815"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2356, "estimated_input_tokens": 1050, "item_count": 2, "node_count": 2, "serialized_chars": 4199, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:47:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ced90945d9dcb0301c59af02e97cbacc56e0b231202c18a9acf46aea663132d9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:39144", "pull_request:39177"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #39177 is the strongest cluster representative because it is merged and addresses a concrete, well-scoped code-path issue in Trainer rather than a separate quantizer-specific device-selection bug.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #39177 is the better canonical representative: it is merged, has higher review activity, and its diff targets a clear trainer/FSDP device-placement bug.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:39144", "reason": "Reject: these fix different problems in different code paths. #39144 changes GPTQ quantizer device-map defaults for XPU/CUDA, while #39177 changes Trainer logic for delayed optimizer/model preparation under FSDP v1.", "right": "pull_request:39177"}], "summary": "Two unrelated pull requests: one fixes GPTQ device selection on XPU, the other fixes model/device handling in Trainer for FSDP v1. They share only a broad \"device\" theme, not the same bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:39144|pull_request:39177"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5786, "estimated_input_tokens": 2765, "item_count": 4, "node_count": 4, "serialized_chars": 11060, "soft_pair_count": 4}, "cached_at": "2026-04-14T13:47:27Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "75b97377c681cfcf347fd6007e57b7ebcb5995ce3f7a0febe12570fb4ae59b1f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:39722", "pull_request:43174", "pull_request:43458", "pull_request:43538"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#43538 is the strongest single representative because it is merged, broadest in scope, and subsumes the Music Flamingo changes seen in the other PRs.", "canonical_issue_reason": null, "canonical_pr_reason": "#43538 is the merged, final Music Flamingo PR and includes the full model integration plus docs/auto-class wiring, making it the best canonical representative of the cluster.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:39722", "reason": "Intern-S1 and Music Flamingo are unrelated model additions; no shared code path or concrete bug/change. Same broad repo area only.", "right": "pull_request:43458"}, {"accept": true, "left": "pull_request:43174", "reason": "Both are Music Flamingo PRs with matching feature intent and effectively the same change set; #43538 looks like the finalized/merged version of the same model integration.", "right": "pull_request:43538"}, {"accept": true, "left": "pull_request:43174", "reason": "Both target Music Flamingo integration, and the overlap in model files/tests indicates the same underlying feature work that could plausibly be merged into one PR.", "right": "pull_request:43458"}, {"accept": true, "left": "pull_request:43458", "reason": "They share the same AudioFlamingo3/MusicFlamingo code paths and tests, with #43538 extending the same concrete model support into the final merged feature PR.", "right": "pull_request:43538"}], "summary": "Three PRs are clear Music Flamingo iterations/duplicates, with #43538 as the merged final implementation; #39722 (Intern-S1) is unrelated and should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:39722|pull_request:43458", "pull_request:43174|pull_request:43538", "pull_request:43174|pull_request:43458", "pull_request:43458|pull_request:43538"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3224, "estimated_input_tokens": 1484, "item_count": 2, "node_count": 2, "serialized_chars": 5933, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:47:45Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8195b30365cb35c9ffca6a801852ebf8bd8d4d2d41ad89deb9ebb45ed8fc16a8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40962", "pull_request:43166"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43166 is the better single representative because it is merged and complete, but it still addresses a different model than PR 40962.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the PRs implement different models with distinct code paths and are not duplicates.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:40962", "reason": "Different model implementations for Isaac and Youtu-LLM. Shared docs/auto-file edits are routine onboarding scaffolding, not a shared bug or change that could be merged into one PR.", "right": "pull_request:43166"}], "summary": "These are two separate model-addition PRs for different architectures (Isaac-0.1 vs Youtu-LLM). The overlap in shared auto-registration/docs files is just model onboarding boilerplate, not the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:40962|pull_request:43166"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3994, "estimated_input_tokens": 1869, "item_count": 3, "node_count": 3, "serialized_chars": 7475, "soft_pair_count": 2}, "cached_at": "2026-04-14T13:47:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "facffb8fc7d34192b2f2eb329b65554aaa69b02026cfa9e1140a5d14527c9d15", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41251", "pull_request:43912", "pull_request:44797"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #41251 is the best representative PR only by breadth and completeness; the other two are narrower, unrelated maintenance/fix changes.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #41251 is the broadest self-contained feature addition in the set, but it is not actually related to the other two PRs.", "confidence": 0.99, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:41251", "reason": "Different targets and code paths: DeepSeek 3.2 model introduction vs GlmMoeDsa implementation/docstring cleanup. They do not appear to fix or implement the same change.", "right": "pull_request:43912"}, {"accept": false, "left": "pull_request:41251", "reason": "Unrelated functionality: adding a new model family versus enabling FlashAttention 4 kernel fallback in attention utilities. No shared concrete bug or mergeable change.", "right": "pull_request:44797"}], "summary": "All three PRs are unrelated: one adds DeepSeek 3.2 support, one is a GlmMoeDsa config/model cleanup and test update, and one adds FlashAttention 4 kernel fallback support. The soft-similarity links are not enough to merge any pair."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:41251|pull_request:43912", "pull_request:41251|pull_request:44797"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2534, "estimated_input_tokens": 1139, "item_count": 2, "node_count": 2, "serialized_chars": 4555, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:48:04Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "763b76132c86ade58b0b2972278acf2f0ddf43967923bcd38095ff7f7a02e19e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41307", "pull_request:42135"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #41307 is the best representative because it implements the concrete version requirement change, whereas PR #42135 only updates documentation.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #41307 is the canonical change: it updates the Torch minimum in package/runtime metadata and multiple code-facing files, making it the substantive dependency bump.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:41307", "reason": "They are related, but not the same underlying change. #41307 raises the minimum Torch version in code and packaging files; #42135 only adjusts installation docs. They would not plausibly be merged as one PR.", "right": "pull_request:42135"}], "summary": "These are related version bumps, but they are not the same change: one is the actual minimum Torch requirement update across package/runtime files, the other is a later docs-only note. Keep them separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:41307|pull_request:42135"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3224, "estimated_input_tokens": 1484, "item_count": 2, "node_count": 2, "serialized_chars": 5934, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:48:16Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a82632330ca4f7de455cf3edfe32fa6b4e6e9868a6b84f168bb40df55432c4f1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41356", "pull_request:44339"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#44339 is the best single PR to keep as canonical because it covers the full concrete code-path change, not just the initial scaffold.", "canonical_issue_reason": null, "canonical_pr_reason": "#44339 is the most complete DEIMv2 implementation in the cluster: it includes the model plus auto mappings, loss utilities, conversion script, modular code, and updated docs/tests. #41356 is a smaller earlier subset.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:41356", "reason": "Related but not the same underlying change. #41356 adds the basic DEIMv2 model/image processor and tests, while #44339 substantially extends the feature with loss code, auto integration, conversion tooling, modular implementation, and config updates. They could not plausibly be merged into one PR without losing the distinction between an initial landing and a later expansion.", "right": "pull_request:44339"}], "summary": "Both PRs target the same DEIMv2 tracker issue, but they are not duplicates: #41356 is an initial model/image-processor landing, while #44339 is a much broader follow-up adding loss, auto-class wiring, conversion, and modularization. The later PR is the better representative of the full feature, but the pair should not be merged as the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:41356|pull_request:44339"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3422, "estimated_input_tokens": 1583, "item_count": 2, "node_count": 2, "serialized_chars": 6329, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:48:28Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2347d43ee7dac3845cb0e4fb095871adad49f18cbf822a1c203e9e512a2a362c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41545", "pull_request:45176"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45176 is the more self-contained, broader model integration (docs, config, modeling, processing, tests) and is the best representative PR in this set, though it is not a duplicate of #41545.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR for this cluster; the two PRs address different models and different code paths, so neither is a duplicate of the other.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:41545", "reason": "Different model integrations and domains (Parakeet TDT speech vs EfficientViTSAM vision). Shared auto files are generic registry updates, not evidence of the same fix/change.", "right": "pull_request:45176"}], "summary": "These PRs are unrelated model additions: one adds Parakeet TDT ASR support, the other adds EfficientViTSAM vision/segmentation support. They only overlap on shared auto-registration plumbing, not on the underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:41545|pull_request:45176"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 8944, "estimated_input_tokens": 4344, "item_count": 7, "node_count": 7, "serialized_chars": 17376, "soft_pair_count": 8}, "cached_at": "2026-04-14T13:49:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "95a1a7631084794b69d4a84e24e9215765ebf0ae2fe36eb16ad690fca14ad2a4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41797", "pull_request:43695", "pull_request:43780", "pull_request:43853", "pull_request:43882", "pull_request:44607", "pull_request:45186"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43695 is the best cluster representative because it is a concise, merged fix in the only near-duplicate pair and is more central than the unrelated model-addition PRs.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43695 is the clearest representative of the only closely related sub-thread: a focused GPT-OSS tensor-parallel crash fix with the most generic title and the strongest overlap with another candidate.", "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:41797", "reason": "Both are model-introduction PRs, but for different models and unrelated code paths; shared docs/auto-registration files are too broad to imply duplication.", "right": "pull_request:45186"}, {"accept": true, "left": "pull_request:43695", "reason": "Both target the same GPT-OSS tensor-parallel crash and make small, plausibly mergeable fixes in that path; the titles and scope are closely aligned.", "right": "pull_request:43853"}, {"accept": false, "left": "pull_request:43695", "reason": "GPT-OSS tensor-parallel config fix vs Trainer/DeepSpeed preparation logic; different subsystems and different bug classes.", "right": "pull_request:43780"}, {"accept": false, "left": "pull_request:43695", "reason": "GPT-OSS TP crash fix and Llama4 image-feature output-key fix are unrelated model-specific bugs.", "right": "pull_request:43882"}, {"accept": false, "left": "pull_request:43780", "reason": "Trainer DeepSpeed preparation logic and GPT-OSS TP crash handling do not describe the same concrete bug or code path.", "right": "pull_request:43853"}, {"accept": false, "left": "pull_request:43853", "reason": "One is a GPT-OSS tensor-parallel fix; the other changes Llama4 image feature output handling. No shared underlying defect.", "right": "pull_request:43882"}, {"accept": false, "left": "pull_request:43882", "reason": "Llama4 output-type adjustment and a new Isaac model addition are unrelated changes.", "right": "pull_request:45186"}, {"accept": false, "left": "pull_request:44607", "reason": "Idefics/smolvlm generation-cache behavior vs a new Isaac model addition are unrelated and cannot be merged as the same fix.", "right": "pull_request:45186"}], "summary": "Mostly a heterogeneous cluster: two GPT-OSS tensor-parallel crash fixes are plausibly the same change family, while the remaining PRs are unrelated model additions or independent fixes in other subsystems."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:41797|pull_request:45186", "pull_request:43695|pull_request:43853", "pull_request:43695|pull_request:43780", "pull_request:43695|pull_request:43882", "pull_request:43780|pull_request:43853", "pull_request:43853|pull_request:43882", "pull_request:43882|pull_request:45186", "pull_request:44607|pull_request:45186"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2552, "estimated_input_tokens": 1148, "item_count": 2, "node_count": 2, "serialized_chars": 4589, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:49:32Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d5cfcfaf718f7806a57c126a5eb65da9c80ed8b2fd5f0e96d9563584d984cda3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42136", "pull_request:44159"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 42136 is the strongest representative artifact: merged, higher activity, and covers the shared OWL-ViT SDPA work plus OWLv2 updates.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 42136 is the better canonical PR because it is already merged and appears to be the broader completed change, touching both OWL-ViT and OWLv2.", "confidence": 0.87, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:42136", "reason": "Shared issue target and files, but the diffs are not the same concrete fix: 44159 adds Flash Attention/SDPA support for OWL-ViT, while 42136 is a separate earlier merged PR that also updates OWLv2. Not safe to collapse into one PR.", "right": "pull_request:44159"}], "summary": "Both PRs target the same tracking issue (28103) and overlap on OWL-ViT attention support, but the later open PR expands to Flash Attention/SDPA support while the earlier merged PR also touches OWLv2. They look related, not like the same mergeable code change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:42136|pull_request:44159"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19800, "estimated_input_tokens": 9772, "item_count": 15, "node_count": 15, "serialized_chars": 39085, "soft_pair_count": 24}, "cached_at": "2026-04-14T13:50:18Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "61b122dd3497b06b0b9081781a223c272b911294ccbfa2c8047e51843df96e64", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42166", "pull_request:42504", "pull_request:43177", "pull_request:43331", "pull_request:44711", "pull_request:44904", "pull_request:45019", "pull_request:45079", "pull_request:45211", "pull_request:45334", "pull_request:45345", "pull_request:45351", "pull_request:45352", "pull_request:45371", "pull_request:45426"], "result": {"analyst_result": {"best_issue_reason": "No issue artifact is available in this cluster.", "best_pr_reason": "PR 45079 is the best overall representative because it is a closed, merged, well-scoped bug fix with tests and the clearest concrete code-path impact.", "canonical_issue_reason": "No canonical issue node is present in this cluster; all items are pull requests.", "canonical_pr_reason": "PR 45079 is the strongest representative of the cluster\u2019s accepted duplicate set: it is merged, issue-linked, and fixes a concrete runtime bug in resize_token_embeddings/post_init with tests.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45211", "reason": "Same Qwen3Moe forward return-annotation fix on the same code path; 45352 is a later variant of the same change.", "right": "pull_request:45352"}, {"accept": true, "left": "pull_request:44711", "reason": "Both target the resize_token_embeddings/post_init LM-head reinitialization problem in modeling_utils and are plausibly mergeable into one fix.", "right": "pull_request:45079"}, {"accept": true, "left": "pull_request:44904", "reason": "Both address the Granite multiplier-field int/float validation issue in the same underlying config path.", "right": "pull_request:45019"}, {"accept": true, "left": "pull_request:45351", "reason": "Same CUDA availability guard for get_device_capability in testing_utils; these are alternate implementations of the same bug fix.", "right": "pull_request:45371"}, {"accept": false, "left": "pull_request:45211", "reason": "Unrelated: Qwen3MoE annotation fix vs adding a new AX-K1 model package and auto-registration.", "right": "pull_request:45334"}, {"accept": false, "left": "pull_request:45211", "reason": "Unrelated: Qwen3MoE bug fix vs another AX-K1 feature-add PR.", "right": "pull_request:45426"}, {"accept": false, "left": "pull_request:45211", "reason": "Different subsystems and bugs: Qwen3MoE return annotation vs Llama tokenizer byte-level handling.", "right": "pull_request:45345"}, {"accept": false, "left": "pull_request:43177", "reason": "Cache reordering device fix is unrelated to the AX-K1 model addition.", "right": "pull_request:45334"}, {"accept": false, "left": "pull_request:43177", "reason": "Unrelated: cache reordering bug fix vs AX-K1 feature work.", "right": "pull_request:45426"}, {"accept": false, "left": "pull_request:43177", "reason": "Different code paths and problems: cache device handling vs tokenizer format detection.", "right": "pull_request:45345"}, {"accept": false, "left": "pull_request:42166", "reason": "InternVLFlash model addition/docs are unrelated to the AX-K1 feature add.", "right": "pull_request:45334"}, {"accept": false, "left": "pull_request:42166", "reason": "Different model families and change sets; both are feature additions but not the same underlying change.", "right": "pull_request:45426"}, {"accept": false, "left": "pull_request:42166", "reason": "Model addition vs Qwen3MoE annotation fix; no shared bug or change.", "right": "pull_request:45211"}, {"accept": false, "left": "pull_request:42166", "reason": "Unrelated: model addition/docs vs cache device reordering bug fix.", "right": "pull_request:43177"}, {"accept": false, "left": "pull_request:45019", "reason": "Granite config typing fix and Qwen3MoE annotation fix are unrelated.", "right": "pull_request:45211"}, {"accept": false, "left": "pull_request:42166", "reason": "InternVLFlash model addition vs Granite config typing fix; different subsystems and bugs.", "right": "pull_request:45019"}, {"accept": false, "left": "pull_request:42166", "reason": "Unrelated: model addition/docs vs LM-head resize/post_init fix.", "right": "pull_request:45079"}, {"accept": false, "left": "pull_request:43177", "reason": "Cache reordering device bug and Granite config typing bug are unrelated.", "right": "pull_request:45019"}, {"accept": false, "left": "pull_request:45079", "reason": "Different bugs in different subsystems: LM-head resize/init vs Qwen3MoE return annotation.", "right": "pull_request:45211"}, {"accept": false, "left": "pull_request:43177", "reason": "Cache reordering device fix is unrelated to the LM-head resize/post_init fix.", "right": "pull_request:45079"}, {"accept": false, "left": "pull_request:42166", "reason": "InternVLFlash model addition/docs is unrelated to the YOSO doctest stabilization change.", "right": "pull_request:43331"}, {"accept": false, "left": "pull_request:42166", "reason": "Both are model-addition PRs, but for different model families and not the same underlying feature/change.", "right": "pull_request:42504"}, {"accept": false, "left": "pull_request:45351", "reason": "Testing utility CUDA guard and Qwen3MoE return annotation fix are unrelated.", "right": "pull_request:45352"}], "summary": "This cluster is mostly a mix of unrelated PRs. I only consider four pairs true same-bug variants: Qwen3MoE return annotation, resized LM head/init handling, Granite multiplier int/float handling, and the CUDA-availability guard in testing_utils."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45211|pull_request:45352", "pull_request:44711|pull_request:45079", "pull_request:44904|pull_request:45019", "pull_request:45351|pull_request:45371", "pull_request:45211|pull_request:45334", "pull_request:45211|pull_request:45426", "pull_request:45211|pull_request:45345", "pull_request:43177|pull_request:45334", "pull_request:43177|pull_request:45426", "pull_request:43177|pull_request:45345", "pull_request:42166|pull_request:45334", "pull_request:42166|pull_request:45426", "pull_request:42166|pull_request:45211", "pull_request:42166|pull_request:43177", "pull_request:45019|pull_request:45211", "pull_request:42166|pull_request:45019", "pull_request:42166|pull_request:45079", "pull_request:45019|pull_request:45079", "pull_request:43177|pull_request:45019", "pull_request:45079|pull_request:45211", "pull_request:43177|pull_request:45079", "pull_request:42166|pull_request:43331", "pull_request:42166|pull_request:42504", "pull_request:45351|pull_request:45352"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 12480, "estimated_input_tokens": 6112, "item_count": 10, "node_count": 10, "serialized_chars": 24448, "soft_pair_count": 7}, "cached_at": "2026-04-14T13:50:42Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "171707bac916215775bc271b40c849724676765ad749eb4b6060703f78a78466", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42504", "pull_request:43067", "pull_request:43177", "pull_request:45211", "pull_request:45280", "pull_request:45334", "pull_request:45351", "pull_request:45371", "pull_request:45426", "pull_request:45427"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No single cluster-wide canonical PR exists because the items split into multiple unrelated changes. If a representative is needed, pull_request:45427 is the clearest duplicate-fix exemplar thanks to the explicit issue target and concise fix in testing_utils.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45351", "reason": "Same testing_utils crash fix: both guard CUDA device capability access when CUDA is unavailable, and both target issue 45341.", "right": "pull_request:45427"}, {"accept": true, "left": "pull_request:45371", "reason": "Same underlying bug and same file; the diffs are just alternate formulations of the CUDA-availability guard for issue 45341.", "right": "pull_request:45427"}, {"accept": true, "left": "pull_request:45334", "reason": "Duplicate AXK1 feature PRs: same model package/files and same integration work, so they could plausibly be merged as one change.", "right": "pull_request:45426"}, {"accept": false, "left": "pull_request:42504", "reason": "Both add model docs/integration, but they are for different models (ModernVBERT vs Qianfan-OCR) and not the same underlying change.", "right": "pull_request:45280"}, {"accept": false, "left": "pull_request:43177", "reason": "Unrelated fixes: cache reordering device handling versus a Qwen3MoE return type annotation change.", "right": "pull_request:45211"}, {"accept": false, "left": "pull_request:43067", "reason": "Different model integrations (NomicBERT vs Qianfan-OCR); shared auto-registration boilerplate is not enough to call this a duplicate.", "right": "pull_request:45280"}, {"accept": false, "left": "pull_request:42504", "reason": "Different model additions with distinct code paths and model families; only broad Transformers model-registration scaffolding overlaps.", "right": "pull_request:43067"}], "summary": "This cluster is mostly a mix of unrelated PRs, with three true duplicate subclusters: the CUDA availability guard in testing_utils, the AXK1 model addition, and no other confirmed duplicate pairs. The remaining pairings are only superficially similar via shared boilerplate or broad subsystem overlap."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45351|pull_request:45427", "pull_request:45371|pull_request:45427", "pull_request:45334|pull_request:45426", "pull_request:42504|pull_request:45280", "pull_request:43177|pull_request:45211", "pull_request:43067|pull_request:45280", "pull_request:42504|pull_request:43067"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2290, "estimated_input_tokens": 1017, "item_count": 2, "node_count": 2, "serialized_chars": 4065, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:50:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f511122cb5685fe16d10ff07a1dfb751ff37f7aa674b6430a92129998eac38d2", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42797", "pull_request:43666"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #42797 is the best representative PR here because it addresses an actual code-path bug and is more likely to matter for duplicate triage than the unrelated documentation-only PR.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #42797 is the more substantive change in the cluster: it fixes a real runtime compatibility bug in trainer_pt_utils for BatchFeature inputs. PR #43666 is just a docs typo cleanup and is not the same underlying change.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:42797", "reason": "Different changes: #42797 modifies sampler logic to support BatchFeature, while #43666 only edits docs typos. They do not fix the same bug and could not plausibly be merged as one PR.", "right": "pull_request:43666"}], "summary": "These two PRs are not duplicates: one fixes LengthGroupedSampler compatibility with multimodal BatchFeature inputs, while the other only corrects documentation typos. They touch different code paths and should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:42797|pull_request:43666"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5020, "estimated_input_tokens": 2382, "item_count": 3, "node_count": 3, "serialized_chars": 9528, "soft_pair_count": 2}, "cached_at": "2026-04-14T13:51:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "05043e70f8e65ffce4aea0dc24ba08df1c44ac566ae66ac93207196a0d6d2ace", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42975", "pull_request:42976", "pull_request:45180"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45180 is the strongest cluster anchor: it is merged, most comprehensive, and the only PR with an explicit tracking issue, so it best represents the final state of this workflow-actions maintenance line.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45180 is the best canonical representative because it is merged, has an explicit issue link, and implements the broader later workflow hardening across many of the same files by pinning actions to SHAs.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:42975", "reason": "Related workflow-maintenance theme, but not the same concrete change: 42975 updates specific actions to newer versions for Node 24 compatibility, while 45180 later pins actions to commit SHAs for security hardening. Same files overlap, but the code change and goal are different.", "right": "pull_request:45180"}, {"accept": false, "left": "pull_request:42975", "reason": "Both are GitHub Actions version bumps, but they target different actions and workflows: 42975 updates checkout/cache/upload-artifact, while 42976 updates docker/build-push-action and a different set of workflow files. They could be part of the same maintenance theme, but not one duplicate fix.", "right": "pull_request:42976"}], "summary": "These are related GitHub Actions maintenance PRs, but they are not true duplicates: two earlier PRs bump different action families, while the merged PR later pins actions to commit SHAs as a broader hardening follow-up."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:42975|pull_request:45180", "pull_request:42975|pull_request:42976"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2074, "estimated_input_tokens": 909, "item_count": 2, "node_count": 2, "serialized_chars": 3634, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:51:14Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "031938d55fa69971db1421350f2a467da000a957ccc2e302b0f33c2cb9a677bc", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43211", "pull_request:43463"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43463 best represents the substantive fix for the shared issue target. PR 43211 is a supportive test-flake workaround, not the primary bug fix.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43463 is the better canonical PR because it changes the model code path and addresses the underlying SAM-HQ weight-loading bug; PR 43211 is test-only stabilization and does not fix the core issue.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43211", "reason": "Reject: the PRs do not fix the same concrete bug or change. 43211 seeds integration tests to avoid flaky randomness, while 43463 alters SamHQModel tied-weights behavior for checkpoint loading. Same issue target alone is insufficient.", "right": "pull_request:43463"}], "summary": "Both PRs reference the same tracking issue, but they fix different things: one stabilizes flaky SAM-HQ integration tests by seeding randomness, while the other fixes the SAM-HQ weight-loading implementation by declaring tied weights. They should not be clustered as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43211|pull_request:43463"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2634, "estimated_input_tokens": 1189, "item_count": 2, "node_count": 2, "serialized_chars": 4754, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:51:23Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "06123203d683fb52b2bad6a1e8902a0f5b6b6d08c07b841a33de378587d8ef88", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43241", "pull_request:44304"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:44304 best represents the cluster because it is a focused docs PR with explicit new integration pages, whereas pull_request:43241 is a broader/more generic docs update.", "canonical_issue_reason": null, "canonical_pr_reason": "pull_request:44304 is the more specific and self-contained docs addition, with a clear title and two new integration pages focused on one integration family.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43241", "reason": "Both edit docs/_toctree.yml, but they add different community integrations and different pages. They do not look like the same concrete change and could not plausibly be merged as one PR.", "right": "pull_request:44304"}], "summary": "Two documentation pull requests in the same docs area, but they add different community integration pages: one for Candle/ExecuTorch/MLX and one for NeMo Automodel pretraining/finetuning. Shared _toctree edits are too broad to indicate the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43241|pull_request:44304"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2452, "estimated_input_tokens": 1098, "item_count": 2, "node_count": 2, "serialized_chars": 4390, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:52:09Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2d93499c83c40af0e08a332dc41c5b8361cd145aec1d6cfcfd5fc7e0a58b16e3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43273", "pull_request:43660"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43660 is the strongest representative of the fix because it contains the concrete behavioral change and verification. PR #43273 is narrower and less complete.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43660 is the better canonical PR: it fixes the actual code path in `apply_chat_template` by populating `sampling_rate` from `feature_extractor` when absent, and it includes tests. PR #43273 mainly adjusts the kwarg typing/defaults and appears to be an earlier partial step.", "confidence": 0.88, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43273", "reason": "Same underlying bug in `apply_chat_template` audio sampling-rate handling for the same tracked issue; #43660 completes the fix that #43273 started.", "right": "pull_request:43660"}], "summary": "Both pull requests address the same audio-chat-template sampling rate bug for issue #43262, but PR #43660 is the more complete fix because it implements the runtime fallback from `feature_extractor.sampling_rate` and adds tests. PR #43273 looks like an earlier partial/adjacent change in the same area."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "Accepted: the summary is grounded in the packet. Both PRs target issue 43262 in the same file, and PR #43660 clearly adds the runtime fallback plus tests, making it a reasonable more-complete representative of the fix."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:43273|pull_request:43660"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3990, "estimated_input_tokens": 1867, "item_count": 3, "node_count": 3, "serialized_chars": 7465, "soft_pair_count": 3}, "cached_at": "2026-04-14T13:52:21Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4eb18ecf669eff529049c9b28a65e0c6725aca86e03e328022d0de106eae98ae", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43336", "pull_request:43658", "pull_request:43841"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43841 is the best standalone fix candidate because it states the bug precisely and contains the most explicit implementation note for disabling sparse-layer creation. It is the cleanest representation of the underlying change.", "canonical_issue_reason": null, "canonical_pr_reason": "Use PR #43841 as the canonical representative for the sparse-layer bug fix: it targets the same code path as #43658, has the clearest title, and its diff explicitly explains the zero-step behavior. #43336 is unrelated documentation/comment cleanup.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43658", "reason": "Same underlying bug in `SwitchTransformersConfig`: both change the zero-sparse-layer behavior in `configuration_switch_transformers.py` and are clearly duplicate fixes for the same concrete code path.", "right": "pull_request:43841"}, {"accept": false, "left": "pull_request:43336", "reason": "#43336 is a docs/comment update spanning unrelated files (`audio_utils.py` and docstrings) and does not fix the sparse-layer creation bug.", "right": "pull_request:43841"}, {"accept": false, "left": "pull_request:43336", "reason": "#43336 only edits documentation/comments and `audio_utils.py`, while #43658 changes the sparse-layer initialization logic; they are not the same bug/change.", "right": "pull_request:43658"}], "summary": "Two PRs are the same concrete SwitchTransformers bug fix for disabling sparse layers when `num_sparse_*_layers=0`; the third PR is a separate docs/comment cleanup and should not be merged with them."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43658|pull_request:43841", "pull_request:43336|pull_request:43841", "pull_request:43336|pull_request:43658"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2258, "estimated_input_tokens": 1001, "item_count": 2, "node_count": 2, "serialized_chars": 4002, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:52:43Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c825018f2513c17e926d98ad4f31252cb4f60b246c60c094481473a196a6d166", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43354", "pull_request:43467"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:43467 is the best representative of the cluster since it attempts to correct the test setup itself and is more complete than the skip-only workaround.", "canonical_issue_reason": null, "canonical_pr_reason": "pull_request:43467 is the stronger canonical PR because it makes an actual code-path change to the integration tests rather than only skipping them.", "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43354", "reason": "Both reference the same issue and file, but they solve the test failure differently: 43354 adds a skip, while 43467 changes the instantiated model class. These are related remediation paths, not the same underlying PR change.", "right": "pull_request:43467"}], "summary": "Two closed PRs target the same GLM image test file and the same issue, but they implement different remedies: one skips the problematic integration tests, while the other rewrites test instantiation to use a different model class. They are related, but not duplicates of the same concrete code change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43354|pull_request:43467"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3652, "estimated_input_tokens": 1698, "item_count": 2, "node_count": 2, "serialized_chars": 6789, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:53:14Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2399a7191d6f083aa7072d43092e8c3f7d5f86dbdabe50e66997e0cc602068ce", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43357", "pull_request:44171"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44171 is the best representative for the cluster because it appears to subsume the earlier Parakeet TDT support and adds the most complete surface area for the feature.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44171 is the broader and more recent Parakeet TDT implementation, covering the core model plus AutoModelForTDT and related integration points.", "confidence": 0.78, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43357", "reason": "Both PRs target the same underlying Parakeet TDT feature and share many core Parakeet files; #44171 looks like a broader continuation of #43357 rather than a separate change.", "right": "pull_request:44171"}], "summary": "Two PRs add Parakeet TDT support; the second is a broader follow-up with the same core Parakeet TDT implementation plus additional integration/docs work."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is grounded in the packet: both PRs focus on Parakeet TDT support, share many Parakeet files, and #44171 clearly extends the earlier work with additional AutoModel/docs/processing integration. The soft-edge accept is conservative enough here."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:43357|pull_request:44171"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3392, "estimated_input_tokens": 1568, "item_count": 2, "node_count": 2, "serialized_chars": 6272, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:53:21Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2f995724d839e47e52e43b3f17fb8219d3eebe12bf70491b0c05caaeb0825464", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43396", "pull_request:44167"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44167 is the best representative because it was merged, has the same scope and files as #43396, and thus reflects the completed canonical change.", "canonical_issue_reason": null, "canonical_pr_reason": "Choose PR #44167 because it is the merged version of the same `chore(typing): initial ty integration` change and appears to be the final accepted implementation.", "confidence": 0.99, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43396", "reason": "Same PR title, same touched files, and matching diff content for the same `ty` integration change; the merged PR #44167 is effectively the final version.", "right": "pull_request:44167"}], "summary": "The two pull requests are effectively the same change: identical title, nearly identical diff, and the same set of touched files implementing initial `ty` integration. The merged PR is the better canonical representative."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43396|pull_request:44167"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2398, "estimated_input_tokens": 1071, "item_count": 2, "node_count": 2, "serialized_chars": 4283, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:53:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "688aca09d491d394a4992a8c53df1a794e5d0780dfe1a3e7c8c09018ebcf83f5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43471", "pull_request:43640"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43471", "reason": "Different underlying problems and different fixes. PR 43471 changes `src/transformers/cli/serve.py` to report missing serving dependencies; PR 43640 changes `modeling_utils.py` to detect a PEFT adapter when `peft` is absent. They do not target the same code path and would not be merged into one PR.", "right": "pull_request:43640"}], "summary": "The two PRs are both user-facing error-message improvements, but they fix different code paths: serving CLI missing dependencies vs. PEFT adapter loading without `peft` installed. They should not be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43471|pull_request:43640"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2162, "estimated_input_tokens": 953, "item_count": 2, "node_count": 2, "serialized_chars": 3812, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:53:36Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f0fe9ebb7b031a05d41f9cc23b9440fcd526d317b78df91efc3a35f5c61a9982", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43473", "pull_request:45031"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45031 is the best representative because it has a precise, self-contained change to tied weights for CamembertForCausalLM and a clear model-specific correction.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45031 is the cleaner canonical PR for the cluster only by being a narrowly scoped, explicit fix to a concrete model bug; PR 43473 is a separate model-loading fix in a different subsystem.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43473", "reason": "Different models and failure modes: Apertus activation init/meta-tensor handling vs. Camembert tied-weights mapping. No shared code path or mergeable single fix.", "right": "pull_request:45031"}], "summary": "These are two unrelated merged PRs: one fixes Apertus initialization to avoid meta-tensor loading errors, and the other corrects Camembert CausalLM tied-weight metadata. They do not share the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43473|pull_request:45031"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2586, "estimated_input_tokens": 1165, "item_count": 2, "node_count": 2, "serialized_chars": 4660, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:53:43Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fe2c467b20466f1b39377087f534311dd7e48901c0998e484f64ccd335886039", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43501", "pull_request:44475"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44475 is the best representative artifact: it is merged, narrowly scoped, and has an explicit, concrete fix. PR #43501 is broader and unrelated to the tokenizer-class registration change.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44475 is the better canonical PR for this cluster because it is a small, merged, targeted fix with a clear code-path change in `tokenization_auto.py`. PR #43501 is a separate, unmerged feature addition in a different model area.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43501", "reason": "Different changes in different model areas: Qwen3 TTS encoder addition vs Chameleon tokenizer-class workaround. No shared bug or mergeable single fix path.", "right": "pull_request:44475"}], "summary": "The two pull requests are unrelated: one adds a Qwen3 TTS tokenizer encoder, while the other updates auto-tokenizer handling for Chameleon. They do not fix the same concrete code path and should not be clustered as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43501|pull_request:44475"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5298, "estimated_input_tokens": 2521, "item_count": 6, "node_count": 6, "serialized_chars": 10084, "soft_pair_count": 5}, "cached_at": "2026-04-14T13:54:22Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2e13c89441fff4020cafc4799cf2a3d216ca514dc9f4435f823bf83e5326ac95", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43515", "pull_request:43516", "pull_request:43753", "pull_request:43892", "pull_request:44948", "pull_request:45022"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#43516 is the strongest anchor for the cluster because it is the superset of the only plausible duplicate PR and is the most complete representation of that change; the remaining PRs are not duplicates of it or of each other.", "canonical_issue_reason": null, "canonical_pr_reason": "No issue artifacts are present. Among the PRs, #43516 is the best canonical representative because it subsumes #43515\u2019s same branch-creation content and adds one extra file, making it the more complete version of the only near-duplicate pair.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43515", "reason": "Same underlying branch-creation change: both add the same .vscode/settings.json, hello.py, and submodule entry; #43516 just adds one extra file, so they are plausibly the same PR content.", "right": "pull_request:43516"}, {"accept": false, "left": "pull_request:43516", "reason": "#43516 is a branch-creation/test-files PR, while #43892 updates KERNELS_MIN_VERSION in import_utils.py; different code paths and unrelated changes.", "right": "pull_request:43892"}, {"accept": false, "left": "pull_request:43753", "reason": "#43753 changes a runtime version constant in import_utils.py, but #45022 only edits docs/source/_config.py; no shared bug or concrete change.", "right": "pull_request:45022"}, {"accept": false, "left": "pull_request:43892", "reason": "#43892 is a metadata/version update in utils/update_metadata.py, while #45022 changes docs config; they do not fix the same issue or code path.", "right": "pull_request:45022"}, {"accept": false, "left": "pull_request:44948", "reason": "#44948 creates aa.py with placeholder content, whereas #45022 edits docs/source/_config.py; unrelated files and purposes.", "right": "pull_request:45022"}], "summary": "Only #43515 and #43516 look like a plausible duplicate pair; both are branch-creation style PRs with the same core files, while the other PRs are unrelated changes in different areas."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is well grounded in the packet. The only plausible duplicate pair is #43515/#43516: they share the same core files and #43516 is a strict superset. The other pair judgments are conservative and supported by the diffs, and there is no overstatement about issue linkage since no issue artifacts are present."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:43515|pull_request:43516", "pull_request:43516|pull_request:43892", "pull_request:43753|pull_request:45022", "pull_request:43892|pull_request:45022", "pull_request:44948|pull_request:45022"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2572, "estimated_input_tokens": 1158, "item_count": 2, "node_count": 2, "serialized_chars": 4629, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:54:35Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4376d7f2bdcac39c983443e98e26324739bc951c11992b2479dc65877bba1964", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43517", "pull_request:43574"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "Neither PR is a good global representative of the cluster; the items do not describe the same underlying change.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR fits the cluster because the PRs are unrelated changes in different subsystems.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43517", "reason": "Different bug fixes in different areas: Trainer sequence-parallel evaluation vs. initialization behavior for SigLIP under ZeRO3. They would not plausibly merge into one PR.", "right": "pull_request:43574"}], "summary": "These two pull requests are not duplicates: one fixes Trainer evaluation with sequence parallel, while the other changes weight initialization for SigLIP/ZeRO3. They touch different code paths and solve different problems."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43517|pull_request:43574"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2460, "estimated_input_tokens": 1102, "item_count": 2, "node_count": 2, "serialized_chars": 4405, "soft_pair_count": 1}, "cached_at": "2026-04-14T13:54:42Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c3e7b2d4d307f4cc53a677db19462152336348e10a169d5b997ed4a910efdbec", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43545", "pull_request:43547"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43547 is the best representative of the cluster since it addresses the concrete code-path bug and would be the main fix users care about.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43547 is the stronger canonical PR because it fixes the underlying Fbgemm FP8 behavior in integration/quantizer code, not just the tests.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43545", "reason": "Do not merge: 43545 only adjusts/removes test coverage and skips, while 43547 changes FP8 runtime logic to update input_scale_ub after weight loading. Same subsystem, but different underlying changes and not one combined fix.", "right": "pull_request:43547"}], "summary": "Two merged PRs touch the same Fbgemm FP8 test area, but they fix different problems: one is a test-only cleanup/removal, the other is a real runtime quantization bug fix in the FP8 code path."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43545|pull_request:43547"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3830, "estimated_input_tokens": 1787, "item_count": 3, "node_count": 3, "serialized_chars": 7145, "soft_pair_count": 2}, "cached_at": "2026-04-14T13:54:51Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7420945167630707afa42e8f1cf534507bed800453a62c1cb5ef36c17afcd20b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43561", "pull_request:43754", "pull_request:45321"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45321 is the strongest standalone candidate: it has a focused torchao integration change and a matching test update. PR 43561 is broader/docs-oriented, and PR 43754 is unrelated SDPA cleanup in other model files.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45321 is the best representative of the current cluster because it is the only open PR and it targets the same torchao integration area as one of the other items, but it is still a distinct change rather than the same fix.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43561", "reason": "Both touch torchao integration/tests, but 43561 removes autoquant support while 45321 changes torchao tensor repr handling; they are different features/code paths, not one fix.", "right": "pull_request:45321"}, {"accept": false, "left": "pull_request:43561", "reason": "These address different problems: 43561 removes torchao.autoquant references, while 43754 removes SDPA workarounds for newer torch versions across several model implementations.", "right": "pull_request:43754"}], "summary": "These PRs are related only by the torchao/quantization area, but they change different code paths: one removes autoquant support, one removes SDPA workarounds, and one simplifies torchao tensor repr handling. I would not merge any of the soft pairs as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43561|pull_request:45321", "pull_request:43561|pull_request:43754"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 21646, "estimated_input_tokens": 10695, "item_count": 18, "node_count": 18, "serialized_chars": 42779, "soft_pair_count": 24}, "cached_at": "2026-04-14T13:55:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e9f60359e19cc9223d9114cc9d59f6447c0f1d7bddf76d527d9f304a98fc422a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43983", "pull_request:43996", "pull_request:44007", "pull_request:44011", "pull_request:44013", "pull_request:44020", "pull_request:44022", "pull_request:44023", "pull_request:44039", "pull_request:44044", "pull_request:44066", "pull_request:44072", "pull_request:44085", "pull_request:44089", "pull_request:44091", "pull_request:44093", "pull_request:44129", "pull_request:44722"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "43983 is the strongest cluster anchor: merged, explicit target, and broad enough to represent the initial implementation wave for standardized output tracing.", "canonical_issue_reason": null, "canonical_pr_reason": "43983 is the earliest merged PR in the series and appears to establish the standardized output-capture pattern across multiple models, so it is the best representative of the broader refactor family.", "confidence": 0.83, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44011", "reason": "Different model families and different concrete code paths; both are related to the same tracking issue, but they are not the same change.", "right": "pull_request:44020"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin vs DeBERTa are separate refactors in different files; same theme, not the same underlying bug/change.", "right": "pull_request:44093"}, {"accept": false, "left": "pull_request:44020", "reason": "DeBERTa and Swin are distinct model implementations; this is a shared refactor pattern, not a duplicate PR.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44091", "reason": "Both are standardized output-tracing refactors, but for different models/files, so they should stay separate.", "right": "pull_request:44093"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin and Nystromformer are unrelated model implementations; same issue target is not enough to merge them.", "right": "pull_request:44023"}, {"accept": false, "left": "pull_request:44023", "reason": "Different models and separate code paths; these are sibling refactors, not duplicates.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44022", "reason": "ConvBERT and Swin are separate model-specific changes, not one shared fix.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin output tracing and GPT-J/CodeGen output tracing are different concrete refactors.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44091", "reason": "Swin and GPT-J/CodeGen are unrelated implementations; do not merge across model families.", "right": "pull_request:44722"}, {"accept": true, "left": "pull_request:44085", "reason": "These are both GPT-J output-tracing refactors on the same core file/code path; 44722 looks like a broader follow-up that includes the same GPT-J change.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44020", "reason": "DeBERTa and GPT-J are different code paths and different model-specific refactors.", "right": "pull_request:44085"}, {"accept": false, "left": "pull_request:44085", "reason": "GPT-J and DeBERTa are separate model implementations, so this is not a duplicate change.", "right": "pull_request:44093"}, {"accept": false, "left": "pull_request:44089", "reason": "T5/SqueezeBERT versus DeBERTa are unrelated model-specific refactors.", "right": "pull_request:44093"}, {"accept": false, "left": "pull_request:44085", "reason": "GPT-J and T5/SqueezeBERT do not share the same concrete change.", "right": "pull_request:44089"}, {"accept": false, "left": "pull_request:44020", "reason": "DeBERTa and SpeechT5 are distinct model refactors; same issue target only indicates a common project theme.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:43996", "reason": "FNet/CVT and GPT-J are different implementations, so these are not duplicates.", "right": "pull_request:44085"}, {"accept": false, "left": "pull_request:43996", "reason": "FNet/CVT and DeBERTa v2 are separate model-specific refactors.", "right": "pull_request:44044"}, {"accept": false, "left": "pull_request:44007", "reason": "RegNet/ResNet and EfficientNet are different model paths; same issue target is insufficient for duplicate triage.", "right": "pull_request:44072"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin and GPT-J are separate model refactors, not one underlying change.", "right": "pull_request:44066"}, {"accept": false, "left": "pull_request:43996", "reason": "FNet/CVT and Swin are different model implementations and should remain separate.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44089", "reason": "SqueezeBERT/T5 versus Swin are unrelated changes.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44039", "reason": "AI-generated fallback patch and GPT-J refactor do not appear to be the same concrete code change.", "right": "pull_request:44085"}, {"accept": false, "left": "pull_request:43983", "reason": "DecisionTransformer/GPT2 and DeBERTa v2 are different model-specific refactors.", "right": "pull_request:44044"}, {"accept": false, "left": "pull_request:44013", "reason": "MobileNetV2 and DeBERTa v2 are separate implementations; no duplicate evidence beyond the shared issue target.", "right": "pull_request:44044"}], "summary": "This cluster is mostly a series of related model-by-model refactors for standardized output tracing/capture, not one single duplicate change. A few pairs look like true re-runs or rebased copies of the same model-specific PR, especially within Swin, DeBERTa, and GPT-J."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44011|pull_request:44020", "pull_request:44011|pull_request:44093", "pull_request:44020|pull_request:44091", "pull_request:44091|pull_request:44093", "pull_request:44011|pull_request:44023", "pull_request:44023|pull_request:44091", "pull_request:44022|pull_request:44091", "pull_request:44011|pull_request:44722", "pull_request:44091|pull_request:44722", "pull_request:44085|pull_request:44722", "pull_request:44020|pull_request:44085", "pull_request:44085|pull_request:44093", "pull_request:44089|pull_request:44093", "pull_request:44085|pull_request:44089", "pull_request:44020|pull_request:44129", "pull_request:43996|pull_request:44085", "pull_request:43996|pull_request:44044", "pull_request:44007|pull_request:44072", "pull_request:44011|pull_request:44066", "pull_request:43996|pull_request:44091", "pull_request:44089|pull_request:44091", "pull_request:44039|pull_request:44085", "pull_request:43983|pull_request:44044", "pull_request:44013|pull_request:44044"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20002, "estimated_input_tokens": 9873, "item_count": 18, "node_count": 18, "serialized_chars": 39489, "soft_pair_count": 16}, "cached_at": "2026-04-14T13:56:27Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f835c2cb27429e3caabf879061d0133630abe86bd9d9496e52a28f6084702097", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44011", "pull_request:44018", "pull_request:44019", "pull_request:44059", "pull_request:44066", "pull_request:44068", "pull_request:44071", "pull_request:44085", "pull_request:44086", "pull_request:44088", "pull_request:44091", "pull_request:44102", "pull_request:44104", "pull_request:44105", "pull_request:44139", "pull_request:44140", "pull_request:44141", "pull_request:44335"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44091 is the best overall representative: it is a completed, issue-linked Swin refactor that clearly implements the standardized output-tracing change and includes test coverage.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR cleanly canonicalizes the whole cluster because it spans multiple models; among them, 44091 is the strongest representative since it is closed, issue-linked, and shows the full output-tracing refactor with tests.", "confidence": 0.74, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44059", "reason": "Same model (GPT-2), same file, and essentially the same output-tracing refactor; this looks like duplicate branch work on the same concrete change.", "right": "pull_request:44088"}, {"accept": false, "left": "pull_request:44066", "reason": "Different models and different code paths; both are output-tracing refactors, but not the same underlying change.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44066", "reason": "GPT-J vs GPT-2: same refactor theme, but not the same model or concrete bug/change.", "right": "pull_request:44088"}, {"accept": true, "left": "pull_request:44139", "reason": "Same model (LiLT) and same output-tracing migration pattern; likely duplicate work on the same implementation change.", "right": "pull_request:44141"}, {"accept": true, "left": "pull_request:44018", "reason": "Same model (GPT-Neo), same file, and same standardized output-tracing refactor; strong duplicate signal.", "right": "pull_request:44068"}, {"accept": false, "left": "pull_request:44066", "reason": "GPT-J and MPT are unrelated implementations; similarity is only at the refactor pattern level.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44105", "reason": "Different models (LiLT vs IBERT); not the same concrete code-path change.", "right": "pull_request:44141"}, {"accept": false, "left": "pull_request:44140", "reason": "Megatron-BERT and IBERT are different model implementations; shared decorator migration is too broad to call a duplicate.", "right": "pull_request:44141"}, {"accept": false, "left": "pull_request:44102", "reason": "IBERT and LiLT are distinct models; both touch output tracing, but not the same bug or change.", "right": "pull_request:44105"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin vs ResNet: same tooling idea, but different models and different implementation paths.", "right": "pull_request:44019"}, {"accept": false, "left": "pull_request:44019", "reason": "ResNet and Swin are unrelated code paths; this is a broad refactor similarity, not a duplicate fix.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44102", "reason": "IBERT vs LiLT; same migration style, but not the same underlying change.", "right": "pull_request:44139"}, {"accept": true, "left": "pull_request:44104", "reason": "Same model family (Megatron-BERT) and same output-tracing refactor direction; looks like the same change repeated in another PR.", "right": "pull_request:44141"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin and IBERT are different models; only the refactor pattern matches.", "right": "pull_request:44102"}, {"accept": false, "left": "pull_request:44091", "reason": "Swin vs IBERT: not the same concrete bug/change, just the same decorator migration theme.", "right": "pull_request:44102"}, {"accept": false, "left": "pull_request:44085", "reason": "GPT-J vs RoFormer are different models; the similarity is too generic to merge as duplicates.", "right": "pull_request:44335"}], "summary": "These PRs are mostly separate model-by-model migrations to the same output-tracing pattern, not one shared bug. I only treat same-model, same-file refactor pairs as duplicates; cross-model similarity is just project-wide refactoring."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44059|pull_request:44088", "pull_request:44066|pull_request:44086", "pull_request:44066|pull_request:44088", "pull_request:44139|pull_request:44141", "pull_request:44018|pull_request:44068", "pull_request:44066|pull_request:44071", "pull_request:44105|pull_request:44141", "pull_request:44140|pull_request:44141", "pull_request:44102|pull_request:44105", "pull_request:44011|pull_request:44019", "pull_request:44019|pull_request:44091", "pull_request:44102|pull_request:44139", "pull_request:44104|pull_request:44141", "pull_request:44011|pull_request:44102", "pull_request:44091|pull_request:44102", "pull_request:44085|pull_request:44335"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20596, "estimated_input_tokens": 10170, "item_count": 18, "node_count": 18, "serialized_chars": 40679, "soft_pair_count": 19}, "cached_at": "2026-04-14T13:57:24Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1624a7a2f68c50722edf22778880fd48549b9912674cbc327e30e12f6b28d551", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43590", "pull_request:44011", "pull_request:44018", "pull_request:44019", "pull_request:44020", "pull_request:44046", "pull_request:44065", "pull_request:44066", "pull_request:44068", "pull_request:44084", "pull_request:44086", "pull_request:44087", "pull_request:44088", "pull_request:44091", "pull_request:44093", "pull_request:44094", "pull_request:44335", "pull_request:44722"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43590 is the strongest representative overall: merged, high activity, and the widest scope across many models.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43590 is the broad merged umbrella for the output-tracing cleanup effort and is the best single representative of this cluster.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44019", "reason": "Different models and code paths (Swin vs CTRL); same refactor style, but not the same change.", "right": "pull_request:44065"}, {"accept": false, "left": "pull_request:44019", "reason": "Different models (Swin vs DeBERTa); similar cleanup pattern only.", "right": "pull_request:44020"}, {"accept": false, "left": "pull_request:44019", "reason": "Different models and files; no concrete overlap in the bug/change.", "right": "pull_request:44093"}, {"accept": false, "left": "pull_request:43590", "reason": "#43590 is a broad multi-model sweep; #44066 is a later model-specific follow-up, not the same PR-level change.", "right": "pull_request:44066"}, {"accept": false, "left": "pull_request:44086", "reason": "Different models (MGP-STR vs GPT-J/CodeGen); no shared code path.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44088", "reason": "Different models (GPT-2 vs RoFormer); same refactor theme only.", "right": "pull_request:44335"}, {"accept": false, "left": "pull_request:44088", "reason": "Different model families (GPT-2 vs GPT-J/CodeGen); not the same underlying change.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44065", "reason": "CTRL and GPT-2 are separate model implementations; no duplicate code path.", "right": "pull_request:44088"}, {"accept": false, "left": "pull_request:44084", "reason": "GPT-J and MGP-STR are unrelated model files; only the decorator pattern matches.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44068", "reason": "Different models (GPT-Neo vs GPT-2); similar output-tracing refactor only.", "right": "pull_request:44088"}, {"accept": false, "left": "pull_request:44065", "reason": "CTRL and Mamba2 touch different model code paths.", "right": "pull_request:44087"}, {"accept": false, "left": "pull_request:44084", "reason": "Different models (GPT-J vs GPT-2); not mergeable as one concrete fix.", "right": "pull_request:44088"}, {"accept": true, "left": "pull_request:44046", "reason": "Both change CodeGen output tracing in modeling_codegen.py; #44066 bundles the same CodeGen change with extra GPT-J work.", "right": "pull_request:44066"}, {"accept": true, "left": "pull_request:44066", "reason": "Both are CodeGen output-tracing refactors on the same model/file, so they look like the same underlying change.", "right": "pull_request:44094"}, {"accept": false, "left": "pull_request:44018", "reason": "GPT-Neo and DeBERTa are different models; no shared concrete bug/change.", "right": "pull_request:44020"}, {"accept": false, "left": "pull_request:44018", "reason": "Different model implementations (GPT-Neo vs DeBERTa); same general cleanup only.", "right": "pull_request:44093"}, {"accept": false, "left": "pull_request:44087", "reason": "Mamba2 and GPT-J/CodeGen are unrelated model code paths.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin and GPT-Neo are different models; no duplicate fix.", "right": "pull_request:44018"}, {"accept": false, "left": "pull_request:44018", "reason": "GPT-Neo and Swin are unrelated model files; same refactor family only.", "right": "pull_request:44091"}], "summary": "This cluster is a family of model-specific output-tracing refactors. The true duplicate-like pairs are the same-model same-file variants (Swin, GPT-Neo, DeBERTa, CodeGen, GPT-J); the rest are just similar refactors in different models and should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44019|pull_request:44065", "pull_request:44019|pull_request:44020", "pull_request:44019|pull_request:44093", "pull_request:43590|pull_request:44066", "pull_request:44086|pull_request:44722", "pull_request:44088|pull_request:44335", "pull_request:44088|pull_request:44722", "pull_request:44065|pull_request:44088", "pull_request:44084|pull_request:44086", "pull_request:44068|pull_request:44088", "pull_request:44065|pull_request:44087", "pull_request:44084|pull_request:44088", "pull_request:44046|pull_request:44066", "pull_request:44066|pull_request:44094", "pull_request:44018|pull_request:44020", "pull_request:44018|pull_request:44093", "pull_request:44087|pull_request:44722", "pull_request:44011|pull_request:44018", "pull_request:44018|pull_request:44091"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20388, "estimated_input_tokens": 10066, "item_count": 18, "node_count": 18, "serialized_chars": 40262, "soft_pair_count": 18}, "cached_at": "2026-04-14T13:57:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "db514e5422c86e0f43c779fb296beea9595df046d23f6206096443e93f46fa8d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44011", "pull_request:44018", "pull_request:44020", "pull_request:44046", "pull_request:44047", "pull_request:44049", "pull_request:44065", "pull_request:44068", "pull_request:44071", "pull_request:44084", "pull_request:44085", "pull_request:44087", "pull_request:44091", "pull_request:44094", "pull_request:44098", "pull_request:44101", "pull_request:44116", "pull_request:44722"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44011 is the strongest standalone representative of the cluster\u2019s shared pattern: a clean, self-contained output-tracing migration with direct issue linkage.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44011 is a focused, representative Swin refactor with an explicit issue link (#43979) and clearer clustering signal than the other one-off model migrations.", "confidence": 0.76, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44049", "reason": "Different models and code paths (FNet vs GPT-J); same refactor theme is not enough to treat them as duplicates.", "right": "pull_request:44084"}, {"accept": false, "left": "pull_request:44018", "reason": "GPT-Neo vs Flaubert; unrelated model implementations despite both touching output tracing.", "right": "pull_request:44116"}, {"accept": false, "left": "pull_request:44068", "reason": "Different model families and files; not the same underlying bug or change.", "right": "pull_request:44116"}, {"accept": false, "left": "pull_request:44047", "reason": "Bloom and FNet are separate model refactors, so this is only broad topical similarity.", "right": "pull_request:44049"}, {"accept": false, "left": "pull_request:44065", "reason": "CTRL and MPT are different code paths; both are decorator migrations but not duplicates.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44065", "reason": "CTRL vs a mixed GPT-J/CodeGen PR; the overlap is only at the subsystem/theme level.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44071", "reason": "MPT output tracing refactor vs Mamba2 plus hub-kernel fix; clearly different changes.", "right": "pull_request:44087"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin vs GPT-Neo; same migration pattern, different model code paths.", "right": "pull_request:44068"}, {"accept": false, "left": "pull_request:44068", "reason": "GPT-Neo vs Swin; unrelated model implementations.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44068", "reason": "GPT-Neo versus a broader GPT-J/CodeGen refactor; not the same concrete PR.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44049", "reason": "Different model targets; the metadata points to different code paths, so this is not a duplicate.", "right": "pull_request:44085"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin and CTRL are distinct model-specific refactors.", "right": "pull_request:44065"}, {"accept": false, "left": "pull_request:44065", "reason": "CTRL vs Swin; same standardization theme, different implementations.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44046", "reason": "CodeGen vs GPT-J; similar decorator migration, but not the same change.", "right": "pull_request:44084"}, {"accept": false, "left": "pull_request:44084", "reason": "GPT-J vs CodeGen; these are separate model refactors rather than one shared fix.", "right": "pull_request:44094"}, {"accept": false, "left": "pull_request:44020", "reason": "DeBERTa and CodeGen are unrelated model-specific migrations.", "right": "pull_request:44046"}, {"accept": false, "left": "pull_request:44020", "reason": "DeBERTa vs CodeGen; no shared concrete bug or patch.", "right": "pull_request:44094"}, {"accept": false, "left": "pull_request:44098", "reason": "ViLT vs Flaubert/XLM-related changes; only broad output-handling similarity.", "right": "pull_request:44101"}], "summary": "This cluster is a set of model-specific refactors toward standardized output tracing/decorator-based output handling. They share a common theme, but the soft edges mostly connect different models or mixed-scope PRs rather than the same concrete change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44049|pull_request:44084", "pull_request:44018|pull_request:44116", "pull_request:44068|pull_request:44116", "pull_request:44047|pull_request:44049", "pull_request:44065|pull_request:44071", "pull_request:44065|pull_request:44722", "pull_request:44071|pull_request:44087", "pull_request:44011|pull_request:44068", "pull_request:44068|pull_request:44091", "pull_request:44068|pull_request:44722", "pull_request:44049|pull_request:44085", "pull_request:44011|pull_request:44065", "pull_request:44065|pull_request:44091", "pull_request:44046|pull_request:44084", "pull_request:44084|pull_request:44094", "pull_request:44020|pull_request:44046", "pull_request:44020|pull_request:44094", "pull_request:44098|pull_request:44101"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19848, "estimated_input_tokens": 9796, "item_count": 18, "node_count": 18, "serialized_chars": 39184, "soft_pair_count": 15}, "cached_at": "2026-04-14T13:58:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "939a345f04de52cea8197cf320e10ea3327ca75a387f6f9f71320c1ba04e0f2c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44017", "pull_request:44018", "pull_request:44019", "pull_request:44022", "pull_request:44023", "pull_request:44049", "pull_request:44059", "pull_request:44068", "pull_request:44071", "pull_request:44073", "pull_request:44084", "pull_request:44085", "pull_request:44086", "pull_request:44087", "pull_request:44088", "pull_request:44161", "pull_request:44335", "pull_request:44722"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44068 is the best overall representative because it is focused, explicitly issue-linked, and cleanly reflects the common standardized output tracing change without mixing in unrelated model changes.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44068 is the clearest representative of the shared refactor wave: it directly implements the output-tracing migration, has the explicit #43979 link, and is a typical model-specific instance of the pattern.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44335", "reason": "Both are output-tracing refactors, but for different model families (RoFormer vs GPT-J/CodeGen). They do not fix the same concrete code-path bug or belong in one PR.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44068", "reason": "GPT-Neo and MGP-STR are unrelated model implementations; the PRs share only the generic decorator migration pattern, not the same underlying change.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44023", "reason": "Nystromformer and MPT are separate models with different forward paths. Similar refactor style is not enough to treat them as duplicates.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44059", "reason": "GPT-2 and MGP-STR touch different code paths and model logic. These are independent refactors, not one mergeable change.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44059", "reason": "GPT-2 output tracing and GPT-J/CodeGen output tracing are distinct model-level migrations with different implementations and risks.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44084", "reason": "GPT-J and RoFormer are different architectures; these refactors do not address the same bug or exact behavior change.", "right": "pull_request:44335"}, {"accept": false, "left": "pull_request:44018", "reason": "GPT-Neo versus RoFormer is not the same code path. They are only similar in that both modernize output handling.", "right": "pull_request:44335"}, {"accept": false, "left": "pull_request:44023", "reason": "Nystromformer and MGP-STR are unrelated model implementations; no shared concrete fix is evident.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44022", "reason": "ConvBERT and MPT are different models with distinct tracing code. Same refactor theme, but not the same underlying change.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44017", "reason": "SegFormer and LongT5 are separate model stacks; these are parallel refactors, not duplicates of one bug or feature.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:44017", "reason": "SegFormer and VisualBERT are different architectures and forward paths. The overlap is only the standardized output tracing approach.", "right": "pull_request:44073"}, {"accept": false, "left": "pull_request:44087", "reason": "Mamba2 kernel handling and GPT-2 output tracing are unrelated changes. They do not share a single concrete bug fix.", "right": "pull_request:44088"}, {"accept": false, "left": "pull_request:44022", "reason": "ConvBERT and MPT are separate model implementations; this is a shared refactoring trend, not one duplicate fix.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44019", "reason": "ResNet and FNet are different model families. The PRs do not target the same code path or behavior.", "right": "pull_request:44049"}, {"accept": false, "left": "pull_request:44071", "reason": "MPT and GPT-J are different model families with different output signatures and internals; similar decorator migration is not enough for a duplicate.", "right": "pull_request:44085"}], "summary": "This cluster is a set of model-specific refactors to adopt standardized output tracing decorators, but each PR targets a different architecture and code path. They are related by theme, not duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44335|pull_request:44722", "pull_request:44068|pull_request:44086", "pull_request:44023|pull_request:44071", "pull_request:44059|pull_request:44086", "pull_request:44059|pull_request:44722", "pull_request:44084|pull_request:44335", "pull_request:44018|pull_request:44335", "pull_request:44023|pull_request:44086", "pull_request:44022|pull_request:44071", "pull_request:44017|pull_request:44161", "pull_request:44017|pull_request:44073", "pull_request:44087|pull_request:44088", "pull_request:44022|pull_request:44086", "pull_request:44019|pull_request:44049", "pull_request:44071|pull_request:44085"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19722, "estimated_input_tokens": 9733, "item_count": 18, "node_count": 18, "serialized_chars": 38932, "soft_pair_count": 14}, "cached_at": "2026-04-14T13:58:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "02c365baa6cb89b07b3b39b1926605cb9f57df49589733b250dc042fdf8ab037", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43997", "pull_request:44011", "pull_request:44012", "pull_request:44017", "pull_request:44020", "pull_request:44047", "pull_request:44056", "pull_request:44071", "pull_request:44073", "pull_request:44084", "pull_request:44086", "pull_request:44087", "pull_request:44091", "pull_request:44092", "pull_request:44101", "pull_request:44161", "pull_request:44335", "pull_request:44722"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44084 is the best single exemplar for this cluster because it has a focused GPT-J code change, explicit issue linkage, and matches the overarching standardized output-tracing work without mixing in unrelated files.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44084 is the clearest representative: it is self-contained, directly tied to the output-tracing migration, and explicitly linked to the shared tracking issue while making a concrete model-level refactor.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44011", "reason": "Both are output-tracing refactors, but they target different models (Swin vs MGP-STR) and different concrete code paths; not the same bug/change.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44017", "reason": "Different models and different refactor scope (SegFormer vs GPT-J/CodeGen); same theme only, not a mergeable duplicate.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44047", "reason": "Bloom and MPT are separate implementations with distinct forward paths; these are parallel refactors, not the same underlying change.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44020", "reason": "DeBERTa output-tracing cleanup and Mamba2 tracing/refactor are unrelated model code paths, so they should not be merged as duplicates.", "right": "pull_request:44087"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and VisualBert are unrelated architectures; shared decorator migration is too broad to count as the same change.", "right": "pull_request:44073"}, {"accept": false, "left": "pull_request:44073", "reason": "VisualBert and XLM/Flaubert are separate model families with different attention/output code; not the same concrete fix.", "right": "pull_request:44101"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin and Swinv2 are closely related, but these are still distinct model implementations and PRs; separate refactors, not one duplicate change.", "right": "pull_request:44012"}, {"accept": false, "left": "pull_request:44011", "reason": "Same reasoning as above: Swin and Swinv2 are parallel migrations in different code paths, not one underlying bug or patch.", "right": "pull_request:44092"}, {"accept": false, "left": "pull_request:44012", "reason": "Swinv2 and Swin are separate model files and refactors; similarity of intent is not enough to merge them.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44091", "reason": "These are model-specific output-tracing refactors for two different backbones, so they are not duplicates.", "right": "pull_request:44092"}, {"accept": false, "left": "pull_request:44017", "reason": "SegFormer and RoFormer touch unrelated model internals; same refactor pattern, different change.", "right": "pull_request:44335"}, {"accept": false, "left": "pull_request:44047", "reason": "Bloom and GPT-J are distinct model implementations; the shared decorator migration is not enough to treat them as the same PR.", "right": "pull_request:44084"}, {"accept": false, "left": "pull_request:44056", "reason": "MPNet and XLM/Flaubert are unrelated model paths; these are separate output-tracing migrations.", "right": "pull_request:44101"}, {"accept": false, "left": "pull_request:44073", "reason": "VisualBert and LongT5 are different architectures and code paths; they share a theme but not the same fix.", "right": "pull_request:44161"}], "summary": "These are not duplicates of one another; they are separate model-specific refactors migrating different code paths to standardized output tracing. They share a common pattern and sometimes the same tracking issue, but each PR touches a distinct model implementation and would not plausibly collapse into one change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44011|pull_request:44086", "pull_request:44017|pull_request:44722", "pull_request:44047|pull_request:44071", "pull_request:44020|pull_request:44087", "pull_request:43997|pull_request:44073", "pull_request:44073|pull_request:44101", "pull_request:44011|pull_request:44012", "pull_request:44011|pull_request:44092", "pull_request:44012|pull_request:44091", "pull_request:44091|pull_request:44092", "pull_request:44017|pull_request:44335", "pull_request:44047|pull_request:44084", "pull_request:44056|pull_request:44101", "pull_request:44073|pull_request:44161"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19842, "estimated_input_tokens": 9793, "item_count": 18, "node_count": 18, "serialized_chars": 39170, "soft_pair_count": 15}, "cached_at": "2026-04-14T13:59:08Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "441a75a6f5f747c6ee7e053fd70bc213f49b3b6309d1b7a3ef43859fd10b405e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43997", "pull_request:44007", "pull_request:44012", "pull_request:44017", "pull_request:44022", "pull_request:44046", "pull_request:44049", "pull_request:44056", "pull_request:44059", "pull_request:44065", "pull_request:44071", "pull_request:44076", "pull_request:44092", "pull_request:44101", "pull_request:44161", "pull_request:44333", "pull_request:44335", "pull_request:45365"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44007 is the strongest representative of the cluster because it links to the tracking issue and covers multiple related model files, making it the best single anchor for this refactor family.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44007 is the best canonical PR: it is explicitly tied to issue #43979, is broader than the single-model refactors, and appears to be the most representative umbrella for the standardized output-tracing migration.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44059", "reason": "Same refactor theme, but GPT-2 and CTRL are different model implementations with different forward paths; not the same underlying bug/change.", "right": "pull_request:44065"}, {"accept": false, "left": "pull_request:44017", "reason": "Both are output-tracing refactors, but SegFormer and MPT are unrelated code paths and the diffs are model-specific, not one mergeable change.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44059", "reason": "GPT-2 vs RoFormer are distinct model changes; similar decorator migration does not make them duplicates.", "right": "pull_request:44335"}, {"accept": false, "left": "pull_request:44101", "reason": "XLM/Flaubert and LongT5 are different model families with separate implementations; this is a shared refactor pattern, not a duplicate PR.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:44012", "reason": "Swinv2 vs ConvBERT are unrelated model files and code paths; same infrastructure pattern, different changes.", "right": "pull_request:44022"}, {"accept": false, "left": "pull_request:44022", "reason": "ConvBERT and Swinv2 are different model implementations; no evidence they fix the same concrete change.", "right": "pull_request:44092"}, {"accept": false, "left": "pull_request:44012", "reason": "Swinv2 and CodeGen are distinct model-specific refactors, not one underlying fix.", "right": "pull_request:44046"}, {"accept": false, "left": "pull_request:44046", "reason": "CodeGen and Swinv2 touch different model paths; only the general migration style overlaps.", "right": "pull_request:44092"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet/ResNet/RT-DETR refactor bundle versus MPNet refactor; related theme but not the same concrete change.", "right": "pull_request:44056"}, {"accept": false, "left": "pull_request:44065", "reason": "CTRL and XLM/Flaubert are separate model refactors with different affected code.", "right": "pull_request:44101"}, {"accept": false, "left": "pull_request:44049", "reason": "FNet and ALBERT changes are different model-specific output-handling updates, not duplicates.", "right": "pull_request:44333"}, {"accept": false, "left": "pull_request:44071", "reason": "MPT and ImageGPT are separate model migrations; same tooling, different implementation work.", "right": "pull_request:44076"}, {"accept": false, "left": "pull_request:44056", "reason": "MPNet and LongT5 are different model families; the overlap is only the output-tracing refactor pattern.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet/ResNet/RT-DETR versus LongT5 are not the same code change; they share only the broader migration goal.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:44007", "reason": "This is the broad multi-model refactor versus a GPT-J-specific refactor; related theme, but not the same underlying PR.", "right": "pull_request:45365"}, {"accept": true, "left": "pull_request:44012", "reason": "These appear to be the same Swinv2 output-tracing refactor: same model file, same title, and nearly identical diff preview, so they look duplicate.", "right": "pull_request:44092"}], "summary": "This cluster is mostly a set of model-specific refactors to migrate output tracing to the new capture_outputs/can_return_tuple pattern. They share a theme, but most are not duplicates because they touch different model implementations and code paths. The only strong duplicate pair is the two Swinv2 PRs, which appear to be the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44059|pull_request:44065", "pull_request:44017|pull_request:44071", "pull_request:44059|pull_request:44335", "pull_request:44101|pull_request:44161", "pull_request:44012|pull_request:44022", "pull_request:44022|pull_request:44092", "pull_request:44012|pull_request:44046", "pull_request:44046|pull_request:44092", "pull_request:43997|pull_request:44056", "pull_request:44065|pull_request:44101", "pull_request:44049|pull_request:44333", "pull_request:44071|pull_request:44076", "pull_request:44056|pull_request:44161", "pull_request:43997|pull_request:44161", "pull_request:44007|pull_request:45365"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20308, "estimated_input_tokens": 10026, "item_count": 18, "node_count": 18, "serialized_chars": 40101, "soft_pair_count": 17}, "cached_at": "2026-04-14T13:59:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6dfcd327aa3a5a9d3ac340ac9d470d12dde6bb4cc7c5fb053e5cfdea0a917be4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43996", "pull_request:43997", "pull_request:44010", "pull_request:44011", "pull_request:44018", "pull_request:44044", "pull_request:44047", "pull_request:44049", "pull_request:44071", "pull_request:44073", "pull_request:44074", "pull_request:44089", "pull_request:44091", "pull_request:44101", "pull_request:44129", "pull_request:44147", "pull_request:44161", "pull_request:44333"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44161 is the best overall anchor for this cluster because it directly targets the tracking issue and captures the common output-tracing migration pattern without being just a duplicate of another PR.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44161 is the strongest representative of the cluster theme: it explicitly fixes #43979 and implements the standardized output-tracing refactor in a broader, more self-contained way than the narrower model-specific follow-ups.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44074", "reason": "Both are output-tracing refactors, but they are for different models (TextNet vs LongT5) and do not look like the same concrete change.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:44147", "reason": "CTRL and LongT5 are separate code paths; this is the same refactor style, not the same underlying bug or patch.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:43996", "reason": "Both touch FNet output tracing, but #43996 is a broader FNet+CVT refactor and #44049 is a separate FNet-only PR; not enough evidence they are the same change.", "right": "pull_request:44049"}, {"accept": false, "left": "pull_request:44047", "reason": "BLOOM and ALBERT are unrelated model implementations; they share a refactor motif but not the same code-path change.", "right": "pull_request:44333"}, {"accept": false, "left": "pull_request:44010", "reason": "SqueezeBERT and MPT are different models with distinct forward paths; these are parallel refactors, not duplicates.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44010", "reason": "Different models and different modules; same general output-tracing theme, but not the same underlying change.", "right": "pull_request:44073"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and TextNet are separate model implementations; no sign of a shared concrete fix.", "right": "pull_request:44074"}, {"accept": false, "left": "pull_request:44018", "reason": "GPT-Neo and SpeechT5 are unrelated code paths; these are not the same bug/change.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:44071", "reason": "MPT and SqueezeBERT are different model files and refactors; too broad to treat as duplicates.", "right": "pull_request:44089"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and SpeechT5 are unrelated implementations; similar decorator migration, but not the same patch.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:44010", "reason": "SqueezeBERT and GPT-Neo are distinct model paths; no concrete shared change beyond the refactor pattern.", "right": "pull_request:44018"}, {"accept": false, "left": "pull_request:44074", "reason": "TextNet and XLM/Flaubert are different model implementations; same architectural theme, but not the same underlying change.", "right": "pull_request:44101"}, {"accept": false, "left": "pull_request:43996", "reason": "FNet/CVT refactor vs LongT5 refactor; related initiative, but different model code and not the same patch.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:44010", "reason": "SqueezeBERT and DeBERTa-v2 are unrelated model code paths; not a duplicate change.", "right": "pull_request:44044"}, {"accept": false, "left": "pull_request:44044", "reason": "DeBERTa-v2 and TextNet are separate model implementations; they only share the refactor pattern.", "right": "pull_request:44074"}, {"accept": false, "left": "pull_request:44044", "reason": "DeBERTa-v2 and CTRL are different models and different concrete changes.", "right": "pull_request:44147"}, {"accept": true, "left": "pull_request:44011", "reason": "These appear to be the same Swin output-tracing refactor: identical title, same files, same issue target, and matching diff shape.", "right": "pull_request:44091"}], "summary": "This cluster is mostly a set of model-specific output-tracing refactors that share a common architectural pattern, but only one soft edge is a true duplicate pair: Swin PR #44011 and #44091 are the same change. Most other pairs are related refactors in different models, not duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44074|pull_request:44161", "pull_request:44147|pull_request:44161", "pull_request:43996|pull_request:44049", "pull_request:44047|pull_request:44333", "pull_request:44010|pull_request:44071", "pull_request:44010|pull_request:44073", "pull_request:43997|pull_request:44074", "pull_request:44018|pull_request:44129", "pull_request:44071|pull_request:44089", "pull_request:43997|pull_request:44129", "pull_request:44010|pull_request:44018", "pull_request:44074|pull_request:44101", "pull_request:43996|pull_request:44161", "pull_request:44010|pull_request:44044", "pull_request:44044|pull_request:44074", "pull_request:44044|pull_request:44147", "pull_request:44011|pull_request:44091"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20418, "estimated_input_tokens": 10081, "item_count": 17, "node_count": 17, "serialized_chars": 40324, "soft_pair_count": 20}, "cached_at": "2026-04-14T14:01:09Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "21df0b8468780efecc40ae97c72c08315674012f9e67bfd6db8525b22524927b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43996", "pull_request:44007", "pull_request:44011", "pull_request:44020", "pull_request:44022", "pull_request:44023", "pull_request:44044", "pull_request:44066", "pull_request:44084", "pull_request:44085", "pull_request:44089", "pull_request:44093", "pull_request:44104", "pull_request:44129", "pull_request:44140", "pull_request:44154", "pull_request:44722"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44093 is the best overall representative for the duplicate subset because it is narrowly scoped, clearly about the same DeBERTa code path, and has an exact matching counterpart.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44093 is a clean, focused DeBERTa output-tracing refactor and an exact duplicate of #44020; among the true duplicates, it is a strong representative.", "confidence": 0.89, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44020", "reason": "Same DeBERTa file, same issue target, and same output-tracing refactor; looks like a direct duplicate.", "right": "pull_request:44093"}, {"accept": true, "left": "pull_request:44066", "reason": "Both touch GPT-J/CodeGen output tracing with the same concrete code-path changes; likely the same refactor in a later form.", "right": "pull_request:44722"}, {"accept": true, "left": "pull_request:44066", "reason": "Same GPT-J output-tracing refactor in the same file; 44066 just batches it with CodeGen.", "right": "pull_request:44084"}, {"accept": false, "left": "pull_request:44022", "reason": "Different models and different implementations; they share only the broad umbrella issue, not the same code path.", "right": "pull_request:44023"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin vs ConvBERT are separate model refactors, so this is not the same underlying change.", "right": "pull_request:44022"}, {"accept": true, "left": "pull_request:44084", "reason": "Same GPT-J/CodeGen output-tracing family with the same changed files; could plausibly be merged as one PR.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44020", "reason": "DeBERTa and Nystromformer are unrelated model-specific refactors despite the shared umbrella issue.", "right": "pull_request:44023"}, {"accept": false, "left": "pull_request:44023", "reason": "Different model code paths; no indication these are the same bug or change.", "right": "pull_request:44093"}, {"accept": false, "left": "pull_request:44020", "reason": "Different model implementations; only the general output-tracing theme overlaps.", "right": "pull_request:44022"}, {"accept": false, "left": "pull_request:44022", "reason": "ConvBERT and DeBERTa are separate model changes, not a single duplicate PR.", "right": "pull_request:44093"}, {"accept": true, "left": "pull_request:44084", "reason": "The diff preview and filenames line up on the same GPT-J implementation change, so this looks like a duplicate/alternate PR.", "right": "pull_request:44085"}, {"accept": false, "left": "pull_request:44089", "reason": "Different models and different files; they are related only by the standardized tracing effort.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:44020", "reason": "Different model families and code paths; not the same underlying fix.", "right": "pull_request:44089"}, {"accept": false, "left": "pull_request:43996", "reason": "Separate model refactors (CVT/FNet vs RegNet/ResNet/RT-DETR-ResNet), so not duplicates.", "right": "pull_request:44007"}, {"accept": false, "left": "pull_request:43996", "reason": "Different model code paths; same umbrella issue is not enough to merge them as duplicates.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:44044", "reason": "DeBERTa-v2 and SpeechT5 are unrelated implementations, so these are not the same change.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:44044", "reason": "Different model families and distinct output-capture refactors; not a duplicate pair.", "right": "pull_request:44154"}, {"accept": false, "left": "pull_request:44023", "reason": "Nystromformer vs GPT-J/CodeGen are unrelated code paths despite similar refactoring style.", "right": "pull_request:44085"}, {"accept": false, "left": "pull_request:44093", "reason": "Different models and filenames; they only share the umbrella output-tracing theme.", "right": "pull_request:44722"}, {"accept": true, "left": "pull_request:44104", "reason": "Same Megatron-BERT file with nearly identical diff preview; this looks like the same PR content in duplicate form.", "right": "pull_request:44140"}], "summary": "This is mostly a family of model-specific output-tracing refactors tied to umbrella issue #43979, not one single duplicate PR. I only accepted pairs that appear to be the same concrete model implementation change; cross-model refactors were rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44020|pull_request:44093", "pull_request:44066|pull_request:44722", "pull_request:44066|pull_request:44084", "pull_request:44022|pull_request:44023", "pull_request:44011|pull_request:44022", "pull_request:44084|pull_request:44722", "pull_request:44020|pull_request:44023", "pull_request:44023|pull_request:44093", "pull_request:44020|pull_request:44022", "pull_request:44022|pull_request:44093", "pull_request:44084|pull_request:44085", "pull_request:44089|pull_request:44129", "pull_request:44020|pull_request:44089", "pull_request:43996|pull_request:44007", "pull_request:43996|pull_request:44129", "pull_request:44044|pull_request:44129", "pull_request:44044|pull_request:44154", "pull_request:44023|pull_request:44085", "pull_request:44093|pull_request:44722", "pull_request:44104|pull_request:44140"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20594, "estimated_input_tokens": 10169, "item_count": 18, "node_count": 18, "serialized_chars": 40676, "soft_pair_count": 20}, "cached_at": "2026-04-14T14:02:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3b5b2c6de24cd3ef82bc12357373460b8cdb0e46965bc9107b07a3aeb0d53265", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44012", "pull_request:44024", "pull_request:44046", "pull_request:44049", "pull_request:44071", "pull_request:44073", "pull_request:44074", "pull_request:44076", "pull_request:44092", "pull_request:44094", "pull_request:44102", "pull_request:44104", "pull_request:44105", "pull_request:44106", "pull_request:44138", "pull_request:44139", "pull_request:44140", "pull_request:44141"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44092 is the best representative among the true duplicates because it is a clean SwinV2 refactor with test coverage and matches 44012 closely; it still only represents one duplicate pair, not the full cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR for the whole cluster: these are per-model refactors, not one shared change. The only true duplicate sets are 44012\u219444092, 44046\u219444094, 44102\u219444141, and 44105\u219444139.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44102", "reason": "Same IBERT file, same refactor, effectively identical change set and title.", "right": "pull_request:44141"}, {"accept": true, "left": "pull_request:44105", "reason": "Same LiLT file and same output-tracing refactor; looks like a duplicate PR.", "right": "pull_request:44139"}, {"accept": false, "left": "pull_request:44106", "reason": "Different underlying changes: YOSO on one side, but the other PR also adds MRA changes, so this is not the same PR.", "right": "pull_request:44138"}, {"accept": true, "left": "pull_request:44046", "reason": "Same CodeGen refactor on the same file with matching diff scope; clear duplicate.", "right": "pull_request:44094"}, {"accept": true, "left": "pull_request:44012", "reason": "Same SwinV2 refactor on the same file/test pair; appears to be the same change.", "right": "pull_request:44092"}, {"accept": false, "left": "pull_request:44024", "reason": "Both are output-tracing refactors, but for different models (FocalNet vs ImageGPT), so not the same change.", "right": "pull_request:44076"}, {"accept": false, "left": "pull_request:44139", "reason": "Different models and code paths: LiLT versus Megatron-BERT.", "right": "pull_request:44140"}, {"accept": false, "left": "pull_request:44138", "reason": "Different models and changes: YOSO/MRA versus Megatron-BERT; too broad to treat as one PR.", "right": "pull_request:44140"}, {"accept": false, "left": "pull_request:44105", "reason": "LiLT refactor and Megatron-BERT refactor are separate model-specific changes.", "right": "pull_request:44140"}, {"accept": false, "left": "pull_request:44104", "reason": "Megatron-BERT and LiLT are distinct model implementations; same theme but not the same bug/change.", "right": "pull_request:44105"}, {"accept": false, "left": "pull_request:44104", "reason": "Different model code paths (Megatron-BERT vs LiLT).", "right": "pull_request:44139"}, {"accept": false, "left": "pull_request:44106", "reason": "YOSO and Megatron-BERT are unrelated model-specific refactors.", "right": "pull_request:44140"}, {"accept": false, "left": "pull_request:44104", "reason": "Different models and refactor scopes; not mergeable as one concrete change.", "right": "pull_request:44106"}, {"accept": false, "left": "pull_request:44104", "reason": "Megatron-BERT versus YOSO/MRA; these are separate PRs for different code paths.", "right": "pull_request:44138"}, {"accept": false, "left": "pull_request:44138", "reason": "Different models (YOSO/MRA vs LiLT), so not the same underlying change.", "right": "pull_request:44139"}, {"accept": false, "left": "pull_request:44106", "reason": "YOSO and LiLT are separate model refactors, not duplicates.", "right": "pull_request:44139"}, {"accept": false, "left": "pull_request:44105", "reason": "LiLT versus YOSO; same general refactor pattern, but different concrete model changes.", "right": "pull_request:44106"}, {"accept": false, "left": "pull_request:44105", "reason": "LiLT is unrelated to the YOSO/MRA change set.", "right": "pull_request:44138"}, {"accept": false, "left": "pull_request:44049", "reason": "Different model implementations (FNet vs MPT) and different code paths.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44073", "reason": "VisualBERT and TextNet are separate model refactors; same tracing theme is not enough to merge them.", "right": "pull_request:44074"}], "summary": "Mostly a heterogeneous set of model-specific output-tracing refactors. Only the exact same-model pairs look like true duplicates; the rest are separate PRs for different architectures and should stay distinct."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44102|pull_request:44141", "pull_request:44105|pull_request:44139", "pull_request:44106|pull_request:44138", "pull_request:44046|pull_request:44094", "pull_request:44012|pull_request:44092", "pull_request:44024|pull_request:44076", "pull_request:44139|pull_request:44140", "pull_request:44138|pull_request:44140", "pull_request:44105|pull_request:44140", "pull_request:44104|pull_request:44105", "pull_request:44104|pull_request:44139", "pull_request:44106|pull_request:44140", "pull_request:44104|pull_request:44106", "pull_request:44104|pull_request:44138", "pull_request:44138|pull_request:44139", "pull_request:44106|pull_request:44139", "pull_request:44105|pull_request:44106", "pull_request:44105|pull_request:44138", "pull_request:44049|pull_request:44071", "pull_request:44073|pull_request:44074"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 17334, "estimated_input_tokens": 8539, "item_count": 14, "node_count": 14, "serialized_chars": 34154, "soft_pair_count": 21}, "cached_at": "2026-04-14T14:04:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7313e645ad9251cce44617b8db8b92fe138fe65656d554b9cbb6f2f9bcc7cb35", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43997", "pull_request:44010", "pull_request:44046", "pull_request:44047", "pull_request:44056", "pull_request:44073", "pull_request:44074", "pull_request:44094", "pull_request:44129", "pull_request:44147", "pull_request:44148", "pull_request:44149", "pull_request:44150", "pull_request:44151"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44129 is the best global representative of the set because it has the clearest source issue and a focused single-model change, while most others are broader or cross-model refactors without a concrete duplicate signal.", "canonical_issue_reason": null, "canonical_pr_reason": "No issue artifacts are present; 44129 is the strongest representative PR because it is self-contained, explicitly linked to issue #43979, and shows a focused standardized output-capture refactor.", "confidence": 0.78, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44046", "reason": "Same refactor theme, but CodeGen and Bloom are different model code paths.", "right": "pull_request:44047"}, {"accept": false, "left": "pull_request:44047", "reason": "Different model implementations; the shared output-tracing theme is not enough to treat them as the same change.", "right": "pull_request:44094"}, {"accept": false, "left": "pull_request:44056", "reason": "MPNet vs VisualBert are separate model-specific refactors, not one concrete duplicate fix.", "right": "pull_request:44073"}, {"accept": false, "left": "pull_request:44056", "reason": "Same decorator-migration pattern, but MPNet and TextNet are distinct code paths.", "right": "pull_request:44074"}, {"accept": false, "left": "pull_request:44147", "reason": "44148 bundles CTRL with other models; it is not the same concrete change as the CTRL-only PR.", "right": "pull_request:44148"}, {"accept": false, "left": "pull_request:44147", "reason": "CTRL-only refactor vs a broader multi-model bundle; not the same underlying PR.", "right": "pull_request:44149"}, {"accept": false, "left": "pull_request:44147", "reason": "CTRL and MPT are different model implementations, so this is only a theme match.", "right": "pull_request:44150"}, {"accept": false, "left": "pull_request:44147", "reason": "Different model sets and different concrete patches; too broad to merge as duplicates.", "right": "pull_request:44151"}, {"accept": false, "left": "pull_request:44148", "reason": "Overlapping bundle style, but the model sets and concrete edits differ.", "right": "pull_request:44149"}, {"accept": false, "left": "pull_request:44148", "reason": "A multi-model bundle vs an MPT-only refactor; not the same change.", "right": "pull_request:44150"}, {"accept": false, "left": "pull_request:44148", "reason": "Both are bundles, but they cover different model combinations and cannot be treated as one fix.", "right": "pull_request:44151"}, {"accept": false, "left": "pull_request:44149", "reason": "BioGPT/CPMAnt/Bros/MPT bundle vs MPT-only; not the same underlying PR.", "right": "pull_request:44150"}, {"accept": false, "left": "pull_request:44149", "reason": "Different bundle contents and different model coverage; not a duplicate.", "right": "pull_request:44151"}, {"accept": false, "left": "pull_request:44150", "reason": "MPT-only refactor vs BioGPT+MPT bundle; too much extra scope to merge as duplicates.", "right": "pull_request:44151"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and SqueezeBert are unrelated model-specific refactors.", "right": "pull_request:44010"}, {"accept": false, "left": "pull_request:43997", "reason": "Different model families and different code paths; only the general output-tracing theme matches.", "right": "pull_request:44147"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet vs a multi-model bundle; not the same concrete bug or change.", "right": "pull_request:44148"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet is unrelated to the BioGPT/CPMAnt/Bros/MPT bundle.", "right": "pull_request:44149"}, {"accept": false, "left": "pull_request:44010", "reason": "SqueezeBert and SpeechT5 are unrelated implementations, despite both using the tracing refactor pattern.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and MPT are distinct model code paths.", "right": "pull_request:44150"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet is unrelated to the BioGPT+MPT refactor bundle.", "right": "pull_request:44151"}], "summary": "These are mostly model-specific refactors to standardized output tracing/capture decorators. The provided soft edges mostly connect different model implementations or multi-model bundles, so they are not safe duplicate merges under a strict PR triage standard."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44046|pull_request:44047", "pull_request:44047|pull_request:44094", "pull_request:44056|pull_request:44073", "pull_request:44056|pull_request:44074", "pull_request:44147|pull_request:44148", "pull_request:44147|pull_request:44149", "pull_request:44147|pull_request:44150", "pull_request:44147|pull_request:44151", "pull_request:44148|pull_request:44149", "pull_request:44148|pull_request:44150", "pull_request:44148|pull_request:44151", "pull_request:44149|pull_request:44150", "pull_request:44149|pull_request:44151", "pull_request:44150|pull_request:44151", "pull_request:43997|pull_request:44010", "pull_request:43997|pull_request:44147", "pull_request:43997|pull_request:44148", "pull_request:43997|pull_request:44149", "pull_request:44010|pull_request:44129", "pull_request:43997|pull_request:44150", "pull_request:43997|pull_request:44151"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2434, "estimated_input_tokens": 1089, "item_count": 2, "node_count": 2, "serialized_chars": 4355, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:05:41Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "518bd23bf4ed8172aa0cc103fa376ba018e5a7fc93875e980dcacfff2c883ca2", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43607", "pull_request:43654"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43654 is the stronger representative of its own bug because it includes the behavioral fix plus a test update and explicit issue linkage; PR #43607 is a separate, unrelated fix.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43607", "reason": "Different code paths and bug classes: Bark model device mismatch vs tokenizer special-token overwrite behavior. No shared concrete fix and they would not plausibly merge into one PR.", "right": "pull_request:43654"}], "summary": "These are unrelated pull requests: one fixes Bark CPU offload/device placement, the other fixes tokenizer special-token handling in batch add_tokens. They should not be clustered as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43607|pull_request:43654"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3298, "estimated_input_tokens": 1521, "item_count": 2, "node_count": 2, "serialized_chars": 6082, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:05:59Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "21a01b9d5104642c2a298abcc935b0fa7238e9b0afd20962c823e67beb92119b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43612", "pull_request:43613"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43613 is the better representative because it is more comprehensive and spans more concrete model paths and coverage than 43612.", "canonical_issue_reason": null, "canonical_pr_reason": "If one PR must stand in for the cluster, 43613 is the broader and more central change: it adds the visual segmentation pipeline across multiple model integrations and tests, making it the stronger representative.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43612", "reason": "Do not merge: these target different underlying pipelines ('concept' vs 'visual' segmentation) with different model integrations and docs, so they are not the same code change.", "right": "pull_request:43613"}], "summary": "These are related but not duplicates: both add new promptable segmentation pipelines, but one is for concept segmentation and the other for visual segmentation with different model coverage and task docs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43612|pull_request:43613"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2006, "estimated_input_tokens": 875, "item_count": 2, "node_count": 2, "serialized_chars": 3497, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:06:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "bb2c4b55d0e5391ebff989adacbbbd235d2511623078efa883c8bea30a0fe585", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43642", "pull_request:45100"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45100 is the most suitable global representative among the PRs, but only as a documentation-oriented placeholder; it is still unrelated to #43642.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45100 is the better representative of this cluster only because it has clearer documentation intent and more discussion activity; PR #43642 is a tiny comment-only cleanup in an unrelated benchmark config file.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43642", "reason": "Different targets and changes: one edits benchmark config comments, the other rewrites accelerator-selection docs. They do not fix the same underlying bug or implement the same change.", "right": "pull_request:45100"}], "summary": "The two PRs are both minor documentation edits but address completely different files and topics, so they are not duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43642|pull_request:45100"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2404, "estimated_input_tokens": 1074, "item_count": 2, "node_count": 2, "serialized_chars": 4293, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:07:13Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "977a7a3a79eed9cc251ae2df6ebe1a18747e8be975dedee855d3cd8f594b2b9d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43651", "pull_request:43663"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43651 is the better representative of the cluster because it fixes a concrete training bug with a narrowly scoped code-path change, whereas #43663 is primarily a refactor to expose signature-column logic as an override point.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43651", "reason": "Different underlying problems and code paths: one changes loss normalization during training_step, the other changes how trainer input columns are derived for filtering. They are not the same fix and would not plausibly be merged as one PR.", "right": "pull_request:43663"}], "summary": "These two PRs both touch `trainer.py` and add overridable hooks, but they address different behaviors: gradient-accumulation loss scaling vs. dataset column filtering. They are related in theme, not duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43651|pull_request:43663"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3252, "estimated_input_tokens": 1498, "item_count": 3, "node_count": 3, "serialized_chars": 5992, "soft_pair_count": 2}, "cached_at": "2026-04-14T14:09:11Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "30e76d485f323b3f4189762cd0c43cdb0e25f771efb563edd01e18b0fcb5421b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43656", "pull_request:43836", "pull_request:43842"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43842 is the best representative because it is the most focused fix for the concrete `TypeAdapter`/pydantic import problem, making it the clearest candidate for a canonical PR in this cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43842 is the cleanest representative because it directly addresses the reported `TypeAdapter` NameError with the narrowest change in the exact failing Serve CLI code path.", "confidence": 0.78, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43656", "reason": "They touch the same file and similar annotation code, but 43656 targets a different issue (`43576` vs `43824`) and the overlap is too superficial to treat them as the same fix or clearly mergeable.", "right": "pull_request:43842"}, {"accept": true, "left": "pull_request:43836", "reason": "Both address the same `TypeAdapter` NameError in `serve.py` and share issue target `43824`, so they plausibly overlap on the same concrete code path, even though 43836 is broader.", "right": "pull_request:43842"}], "summary": "The cluster is centered on TypeAdapter/annotation handling in `src/transformers/cli/serve.py`, but the evidence is uneven: 43836 and 43842 plausibly overlap on the same NameError path, while 43656 is only a weaker similarity match and points to a different issue target, so the three should not all be treated as the same fix."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": false, "feedback": "The packet does not support the claim that all three PRs are the same bugfix. In particular, 43656 only has soft file/similarity overlap with 43842 and points to a different issue target (43576 vs 43824), so treating that pair as clearly mergeable is too strong. The 43836/43842 pair is more plausible because they share issue target 43824 and the same file, but the summary should still be conservative about them being overlapping fixes rather than near-identical. Overall the analyst overstates the evidence."}, "evaluator_used": true, "retried": true}, "soft_pairs": ["pull_request:43656|pull_request:43842", "pull_request:43836|pull_request:43842"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2550, "estimated_input_tokens": 1147, "item_count": 2, "node_count": 2, "serialized_chars": 4587, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:09:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8eb505ea3da9bbdddeead42a4e58df303bb860d4e2cc2cd7575b80e31e21ff50", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43659", "pull_request:43840"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43659 is the best overall PR candidate because it is already merged and directly addresses the concrete bug in the processor and modular model paths.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43659 is the best canonical PR because it is the merged fix for the exact timestamp bug, with the same file-level changes and explicit issue linkage.", "confidence": 0.99, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43659", "reason": "Same underlying bug and same fix: both replace `merge_size` with `temporal_patch_size` in the Qwen3-VL timestamp calculation code paths, so they are duplicate change attempts and could plausibly be merged into one PR.", "right": "pull_request:43840"}], "summary": "Both PRs fix the same Qwen3-VL timestamp-calculation bug by switching from `merge_size` to `temporal_patch_size` in the same code paths; one is the merged implementation and the other is a near-identical follow-up duplicate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43659|pull_request:43840"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2370, "estimated_input_tokens": 1057, "item_count": 2, "node_count": 2, "serialized_chars": 4225, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:10:14Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b6b2f200a2c6c5e07eb5aa770160497e1a8e38ffe376a50d112149c302af9f89", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43705", "pull_request:44777"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43705 is the best representative of the cluster because it introduces the actual bidirectional-attention behavior; the docs PR depends on it and does not fix or change the runtime code path.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43705 is the substantive implementation: it changes `masking_utils.py` to route `is_causal=False` to bidirectional masking and includes tests/related code updates. PR #44777 is docs-only.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43705", "reason": "Related feature, but not the same concrete change: #43705 implements bidirectional attention in code, while #44777 only documents the `is_causal` feature. They should remain separate PRs.", "right": "pull_request:44777"}], "summary": "The two PRs are related by the same feature flag (`is_causal=False` for bidirectional attention), but they are not the same change: one implements the code path and tests, the other is documentation only. I would not deduplicate them as the same PR."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43705|pull_request:44777"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3778, "estimated_input_tokens": 1761, "item_count": 3, "node_count": 3, "serialized_chars": 7042, "soft_pair_count": 2}, "cached_at": "2026-04-14T14:10:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "cf84d4aa9fdc4ce57e8bd2c321060dfd3be1be218b84786cddd5252553caf1df", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43714", "pull_request:43736", "pull_request:43738"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43738 is the best representative of the cluster because it has the widest scope and most substantial impact among the three distinct refactors.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43738 is the broadest and most central trainer-internal refactor in the set, with the largest diff and the clearest standalone module extraction pattern.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43714", "reason": "Different features: 43714 extracts NEFTune hooks into a standalone integration module, while 43736 moves checkpoint sorting/rotation logic into trainer_utils. They touch some of the same trainer plumbing but not the same underlying bug or change.", "right": "pull_request:43736"}, {"accept": false, "left": "pull_request:43736", "reason": "Different refactors in the trainer: 43736 changes checkpoint management, whereas 43738 moves optimizer initialization logic into trainer_optimizer.py. Shared trainer/test files are broad subsystem overlap, not duplicate content.", "right": "pull_request:43738"}], "summary": "All three pull requests are related trainer refactors, but they address different code paths: NEFTune helpers, checkpoint sorting/rotation, and optimizer initialization. Shared files are incidental, not evidence of duplicate changes."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43714|pull_request:43736", "pull_request:43736|pull_request:43738"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2424, "estimated_input_tokens": 1084, "item_count": 2, "node_count": 2, "serialized_chars": 4336, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:10:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "15f661a6154fd6c84765309d7b12fc339f3c69abefedadfcb94765d6378f7ce6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43718", "pull_request:43770"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#43770 is the best representative PR because it directly fixes the code path, includes validation, and is the completed merged patch. #43718 is essentially the same fix without the follow-up test coverage.", "canonical_issue_reason": null, "canonical_pr_reason": "#43770 is the later, merged implementation of the same fix and adds test coverage; it fully subsumes #43718\u2019s change to gate checkpoint batch-size restoration behind `auto_find_batch_size`.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43718", "reason": "Same concrete bug in `Trainer.train()` and same fix on the same line; #43770 is a superset with tests, so these are duplicate/near-duplicate PRs for triage purposes.", "right": "pull_request:43770"}], "summary": "Both PRs address the same Trainer resume bug: `train_batch_size` was being restored unconditionally from checkpoints, overriding a user\u2019s current batch-size configuration. PR #43770 is the stronger canonical PR because it includes the same fix plus tests and was merged; #43718 is an earlier, narrower draft of the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43718|pull_request:43770"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 8178, "estimated_input_tokens": 3961, "item_count": 6, "node_count": 6, "serialized_chars": 15843, "soft_pair_count": 9}, "cached_at": "2026-04-14T14:12:53Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "79dfd8e2b5f05bf671cf98c6148ff5ca59b1f05a08e50b3eafee9ce4341821c8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43719", "pull_request:43739", "pull_request:43779", "pull_request:43788", "pull_request:43816", "pull_request:43848"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "43719 is the best representative of the cluster because it is merged, directly tied to the tracked issue, and contains the production fix rather than a later variant or test-only follow-up.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43719 is the merged, earliest implementation that explicitly targets #43698 and makes the core code change in integration_utils.py.", "confidence": 0.81, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43719", "reason": "Same SwanLab id/resume feature in the same integration file; these are code-level variants of the same fix.", "right": "pull_request:43816"}, {"accept": true, "left": "pull_request:43719", "reason": "Both implement the same SwanLab resume/id support in integration_utils.py and are plausibly mergeable into one PR.", "right": "pull_request:43788"}, {"accept": true, "left": "pull_request:43739", "reason": "Same underlying SwanLab callback/integration change, just phrased differently and with overlapping code edits.", "right": "pull_request:43816"}, {"accept": true, "left": "pull_request:43739", "reason": "Same concrete feature work for SwanLab resume/id support; both touch the same callback code-path.", "right": "pull_request:43788"}, {"accept": true, "left": "pull_request:43739", "reason": "Both are code changes for the same SwanLab resume/id behavior in the integration callback.", "right": "pull_request:43779"}, {"accept": true, "left": "pull_request:43788", "reason": "Both are variations of the same SwanLab integration fix and address the same runtime behavior.", "right": "pull_request:43816"}, {"accept": false, "left": "pull_request:43739", "reason": "43848 is test-only in a different file; it validates the behavior but is not the same production code change as the implementation PR.", "right": "pull_request:43848"}, {"accept": true, "left": "pull_request:43779", "reason": "Both aim at the same SwanLab resume/id support in the callback/integration and could plausibly be combined.", "right": "pull_request:43788"}, {"accept": true, "left": "pull_request:43719", "reason": "Same underlying SwanLab feature change; both are implementation variants for forwarding resume/id support.", "right": "pull_request:43779"}], "summary": "A tight cluster of SwanLab resume/id support PRs: one merged implementation plus several near-duplicate variants on the same integration file. I\u2019d keep the merged PR as canonical; the only clear outlier is the test-only follow-up."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43719|pull_request:43816", "pull_request:43719|pull_request:43788", "pull_request:43739|pull_request:43816", "pull_request:43739|pull_request:43788", "pull_request:43739|pull_request:43779", "pull_request:43788|pull_request:43816", "pull_request:43739|pull_request:43848", "pull_request:43779|pull_request:43788", "pull_request:43719|pull_request:43779"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2488, "estimated_input_tokens": 1116, "item_count": 2, "node_count": 2, "serialized_chars": 4462, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:14:00Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5ae2325e64dc4e1ed573a4b5fdf4701eb633b55329f77a1a6d35b3d29507e83f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43745", "pull_request:44366"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44366 is the best stand-alone PR to represent this bug because it is narrowly scoped to the exact key-mapping mismatch for models with _checkpoint_conversion_mapping, rather than the broader/generalized wording in PR 43745.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44366 is the better canonical representative: its title and helper-based implementation directly target the _checkpoint_conversion_mapping mismatch during resume_from_checkpoint, and it includes focused test coverage.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43745", "reason": "Same underlying Trainer checkpoint-resume key renaming bug for conversion-mapped models; both modify the same code path and could plausibly be merged into one fix.", "right": "pull_request:44366"}], "summary": "Both pull requests address the same Trainer resume_from_checkpoint failure for checkpoint conversion-mapped models: saved checkpoint keys don\u2019t match the model\u2019s expected keys on restore. PR 44366 looks like a more targeted refinement of the same fix, while PR 43745 is the broader earlier version."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is grounded in the packet: both PRs target the same issue (43701), modify `trainer.py`, and their titles/diffs describe the same resume-from-checkpoint key-mapping mismatch for conversion-mapped models. The soft-edge verdict is conservative enough, and the pair looks plausibly mergeable as one bugfix rather than unrelated changes."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:43745|pull_request:44366"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2682, "estimated_input_tokens": 1213, "item_count": 2, "node_count": 2, "serialized_chars": 4850, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:14:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "18a0e03c7e1be7e1fa5ac8ae285467c874db4d1b97b6f9f7ed23245cba73f16d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43752", "pull_request:43888"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#43888 is the best representative for the cluster because it is narrowly scoped to Param2MoE support and includes the actual source-path changes needed to land the feature.", "canonical_issue_reason": null, "canonical_pr_reason": "#43888 is the cleaner canonical PR: it explicitly adds the Param2MoE model/config/modeling code plus tests and auto-registration, with a title that matches the concrete feature being added.", "confidence": 0.79, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43752", "reason": "They share the same Param2MoE model support files and appear to implement the same underlying feature addition. The code paths and filenames overlap directly, so they could plausibly be merged into one PR.", "right": "pull_request:43888"}], "summary": "Both PRs appear to be about introducing Param2MoE support in Transformers. PR #43888 is the clearer, more targeted implementation PR; PR #43752 looks like an older/release-style sibling with much broader unrelated example churn. I would treat them as the same underlying change and keep #43888 as the canonical representative."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43752|pull_request:43888"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 4146, "estimated_input_tokens": 1945, "item_count": 3, "node_count": 3, "serialized_chars": 7777, "soft_pair_count": 3}, "cached_at": "2026-04-14T14:15:18Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a60b755d3c9b4ee45549721ba9b4ceed34b72a7452cf37ba6745f413ca39248e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43757", "pull_request:45116", "pull_request:45118"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45118 is the best representative because it adds full GGUF support, includes the relevant docs/tests/integration changes, and supersedes the earlier partial fix.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45118 is the fullest implementation, explicitly marked as superseding 43757, and covers the actual GPT-OSS GGUF loading path rather than only a fallback.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45116", "reason": "Same underlying GPT-OSS GGUF loading feature/fix, same issue target, and largely overlapping files; 45118 is a clearer superset that would plausibly subsume 45116 into one PR.", "right": "pull_request:45118"}, {"accept": false, "left": "pull_request:43757", "reason": "43757 is only a stopgap fallback to gpt-neox for unsupported GGUF architectures and even touches an unrelated grounding_dino file; it is not the same concrete fix as full GPT-OSS GGUF loading.", "right": "pull_request:45116"}, {"accept": false, "left": "pull_request:43757", "reason": "43757 is a partial compatibility workaround, while 45118 implements full GGUF loading support; they do not look like the same underlying change and should not be merged as duplicates.", "right": "pull_request:45118"}], "summary": "The cluster is centered on GPT-OSS GGUF loading support. PR 43757 is a narrow fallback to avoid hard failure, while 45116/45118 implement the actual end-to-end GGUF loading support; 45118 is the most complete and explicitly supersedes 43757."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45116|pull_request:45118", "pull_request:43757|pull_request:45116", "pull_request:43757|pull_request:45118"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2352, "estimated_input_tokens": 1048, "item_count": 2, "node_count": 2, "serialized_chars": 4192, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:15:43Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d8ce1f73bcf9663e27799e0b264ab21e0754997bca94e383656ec391c6811068", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43777", "pull_request:44099"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44099 is the best representative of the cluster: same minimal scope, same files, and the more recent version bump.", "canonical_issue_reason": null, "canonical_pr_reason": "Prefer PR 44099 as the canonical representative because it is the later dev-version bump (5.3.0.dev0) and reflects the most recent state of this repetitive version-update pattern.", "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43777", "reason": "Reject: both are dev-version bumps, but they update different target versions (5.2.0.dev0 vs 5.3.0.dev0) and are sequential release changes, not the same underlying fix/change.", "right": "pull_request:44099"}], "summary": "Two PRs that both only bump the dev version in the same files, but they are sequential release/version updates rather than the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43777|pull_request:44099"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2430, "estimated_input_tokens": 1087, "item_count": 2, "node_count": 2, "serialized_chars": 4347, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:16:08Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5e6d53765b2b4919423b88d856bec4c860c8bd3bc2af2d8b80e5daf664995b24", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43790", "pull_request:45014"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "Neither PR is a good global representative for a duplicate cluster because they target different subsystems and different underlying behaviors.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the PRs fix unrelated problems in different code paths and do not appear mergeable into one change.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43790", "reason": "Reject: the changes are unrelated (CI torch version pinning vs test list generation logic) and do not share the same underlying bug or fix.", "right": "pull_request:45014"}], "summary": "These two PRs are not duplicates: one changes CircleCI Dockerfiles to unpin torch, while the other changes test-list generation to skip `tests_hub` when no tests are found."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43790|pull_request:45014"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 17146, "estimated_input_tokens": 8445, "item_count": 14, "node_count": 14, "serialized_chars": 33778, "soft_pair_count": 14}, "cached_at": "2026-04-14T14:17:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "59be1e78cd46cc284249e5e99cf228beb6ed31c297a36d67359ad49db9d506a4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43798", "pull_request:43820", "pull_request:43988", "pull_request:44113", "pull_request:44266", "pull_request:44581", "pull_request:44611", "pull_request:44695", "pull_request:44765", "pull_request:44899", "pull_request:45004", "pull_request:45074", "pull_request:45085", "pull_request:45199"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45074 is the strongest representative of the set for duplicate triage, but the overall cluster is mixed; it is the only item with a close, code-path-level counterpart (45085).", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45074 is the best anchor for the cluster because it is part of the only plausible same-bug pair (with 45085) and targets a concrete dtype-mismatch fix in SwitchTransformers/TimmWrapper rather than a generic test-only cleanup.", "confidence": 0.77, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43798", "reason": "Different problems: test harness usage fixes vs a DAC STE fix in model code. No shared concrete bug or mergeable change.", "right": "pull_request:43820"}, {"accept": false, "left": "pull_request:43820", "reason": "Unrelated: DAC latent decoding/STE logic versus GraniteSpeech device-placement test behavior.", "right": "pull_request:44113"}, {"accept": false, "left": "pull_request:43820", "reason": "Different model paths and bugs: DAC from_latents vs Perceiver positional interpolation.", "right": "pull_request:44899"}, {"accept": false, "left": "pull_request:43820", "reason": "Completely different subsystems and failures: DAC latent decode logic vs SwitchTransformers/TimmWrapper dtype handling.", "right": "pull_request:45074"}, {"accept": false, "left": "pull_request:43988", "reason": "Test-failure fixes for different models and causes: LayoutXLM/LightOnOCR CI tweaks vs Llama4 vision init/MyT5 tokenizer signature.", "right": "pull_request:44581"}, {"accept": false, "left": "pull_request:43988", "reason": "Both touch tests/test_modeling_common.py, but the underlying fixes are unrelated test failures for different models (LayoutXLM/LightOnOCR vs Phi-3/other CI cleanup).", "right": "pull_request:45004"}, {"accept": false, "left": "pull_request:43988", "reason": "Different test failures and model families; no same underlying bug.", "right": "pull_request:44765"}, {"accept": false, "left": "pull_request:43988", "reason": "LayoutXLM/LightOnOCR test tweaks are unrelated to OmDet-Turbo timm-kwargs forwarding.", "right": "pull_request:44611"}, {"accept": false, "left": "pull_request:44266", "reason": "torch.export strict-mode check compatibility is unrelated to SwitchTransformers/TimmWrapper dtype mismatches.", "right": "pull_request:45074"}, {"accept": false, "left": "pull_request:44611", "reason": "OmDet-Turbo timm.create_model kwargs forwarding and Perceiver interpolation are different bugs in different models.", "right": "pull_request:44899"}, {"accept": false, "left": "pull_request:44695", "reason": "Both are test-failure cleanup PRs, but they fix different model/test failures and do not look like one underlying change.", "right": "pull_request:45004"}, {"accept": false, "left": "pull_request:44765", "reason": "Different CI/test failures in different model families; not the same bug.", "right": "pull_request:45004"}, {"accept": false, "left": "pull_request:45074", "reason": "SwitchTransformers/TimmWrapper bf16 dtype handling is unrelated to Wav2Vec2Phoneme tokenizer delimiter regression.", "right": "pull_request:45199"}, {"accept": true, "left": "pull_request:45074", "reason": "Same concrete bug family in the same code paths: both are SwitchTransformers/TimmWrapper bf16 dtype-mismatch fixes and could plausibly be merged into one PR.", "right": "pull_request:45085"}], "summary": "Most pairs are unrelated test/model fixes that only share broad subsystems or generic testing files. The only plausible duplicate edge is the two SwitchTransformers/TimmWrapper dtype-mismatch PRs, which appear to address the same concrete bf16 code path."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43798|pull_request:43820", "pull_request:43820|pull_request:44899", "pull_request:43820|pull_request:44113", "pull_request:43820|pull_request:45074", "pull_request:43988|pull_request:44581", "pull_request:43988|pull_request:45004", "pull_request:43988|pull_request:44765", "pull_request:43988|pull_request:44611", "pull_request:44266|pull_request:45074", "pull_request:44611|pull_request:44899", "pull_request:44695|pull_request:45004", "pull_request:44765|pull_request:45004", "pull_request:45074|pull_request:45199", "pull_request:45074|pull_request:45085"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2134, "estimated_input_tokens": 939, "item_count": 2, "node_count": 2, "serialized_chars": 3756, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:17:42Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fa89f43c0d5c8ce3a750770b6db9ce3471332b4b95ae571de83019e0486150a3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43812", "pull_request:43822"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "43822 best represents the cluster because it fixes the concrete runtime/import problem in the affected code path, while 43812 is a narrower variant of the same typing-import adjustment.", "canonical_issue_reason": null, "canonical_pr_reason": "43822 is the stronger representative because it directly addresses the NameError path in accelerate.py, includes the typing/import cleanup in one coherent fix, and appears to be the more complete follow-up patch.", "confidence": 0.79, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43812", "reason": "Accept: both PRs fix the same underlying accelerate.py import/type-hint bug in the same file and against the same tracked issue; 43822 is a broader implementation of the same change rather than a separate fix.", "right": "pull_request:43822"}], "summary": "Both pull requests target the same accelerate.py import/type-hint failure around torch-dependent names, with overlapping issue target and file; 43822 is the broader, more complete fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43812|pull_request:43822"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2434, "estimated_input_tokens": 1089, "item_count": 2, "node_count": 2, "serialized_chars": 4355, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:18:30Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "59401b5276e84dcdd50891b447dc231f2a2ccbe17244dee6c8cd0a154fd09c06", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43826", "pull_request:43849"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43849 is the strongest representative of the cluster because it most directly addresses the underlying bug and is the more complete fix.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43849 is the better canonical PR: it states the validation bug clearly, ties to issue 43825, and includes tests in addition to the code fix.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43826", "reason": "Same file, same `check_task` translation guard, and the diffs indicate the same bug: translation-specific validation/error handling when translation support is unavailable. These could plausibly be merged into one PR.", "right": "pull_request:43849"}], "summary": "Both pull requests target the same translation-task validation bug in `src/transformers/pipelines/base.py` and appear to be overlapping fixes for the same code path."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is grounded in the packet: both PRs modify `src/transformers/pipelines/base.py`, both target issue 43825, and both change the same `check_task` translation-validation path. The soft-edge accept is conservative enough because the diffs look like overlapping fixes for the same bug and could reasonably be combined into one PR. The canonical-pr comments are also supported by the packet, especially the presence of tests in PR 43849."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:43826|pull_request:43849"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3312, "estimated_input_tokens": 1528, "item_count": 2, "node_count": 2, "serialized_chars": 6112, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:18:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "436555799b33a43d73cc6e1189f5deac58dad41a23b4a0c564029497554eda30", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43830", "pull_request:45101"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:43830 is the better representative of the cluster because it is already merged and contains a full model-family integration, but it is still not a duplicate of pull_request:45101.", "canonical_issue_reason": null, "canonical_pr_reason": "No true duplicate canonical PR here; the closest anchor would be pull_request:43830 because it is the merged, more complete Qwen3.5 support change, while pull_request:45101 is a distinct Nandi model addition.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43830", "reason": "Different model families and different changesets (Qwen3.5 vs Nandi); shared auto files are generic plumbing, not evidence of the same code-path fix or mergeable duplicate PR.", "right": "pull_request:45101"}], "summary": "These are two separate model-support PRs that only overlap in shared auto-registration code; they do not appear to fix the same underlying feature or bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43830|pull_request:45101"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3998, "estimated_input_tokens": 1871, "item_count": 3, "node_count": 3, "serialized_chars": 7481, "soft_pair_count": 3}, "cached_at": "2026-04-14T14:19:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9621a4af870942adf21db33dc46019aa25ed42585e641c49df293fc36f4b8706", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43831", "pull_request:43832", "pull_request:43850"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43831 is the best cluster representative because it actually merged, addresses the shared issue directly, and its diff matches the duplicate PRs closely.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43831 is the merged, earliest, and most clearly established version of the same docs cleanup, with explicit issue linkage and inbound references.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43831", "reason": "Same issue target, same filenames, and effectively the same docs removal in the same task guides; this is a duplicate/alternate PR for the same change.", "right": "pull_request:43832"}, {"accept": true, "left": "pull_request:43831", "reason": "Same underlying docs cleanup: remove the same stale `pipeline()` examples from the same pages, with the same issue target.", "right": "pull_request:43850"}, {"accept": true, "left": "pull_request:43832", "reason": "Both PRs make the same docs-only removal in the same two files and appear to be parallel attempts at the same change.", "right": "pull_request:43850"}], "summary": "All three pull requests are near-identical docs-only removals of the same deprecated `pipeline()` examples in the same task pages, all targeting the same issue (#43827). They look like alternate/duplicate implementations of one change, with 43831 being the merged canonical PR."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is well grounded in the packet: all three PRs target the same issue (#43827), touch the same two files, and the diffs show the same docs-only removal. 43831 being the merged canonical PR is supported by the metadata. The pairwise acceptances are conservative and reasonable for alternate implementations of the same bugfix."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:43831|pull_request:43850", "pull_request:43832|pull_request:43850", "pull_request:43831|pull_request:43832"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2208, "estimated_input_tokens": 976, "item_count": 2, "node_count": 2, "serialized_chars": 3901, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:20:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "18816693541b54adb57aec8104c426a34d29bc644de7fbbb4feaf4813b96e2c0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43833", "pull_request:43839"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43839 is the best representative because it is the completed/merged fix for the bug; #43833 is an open alternate patch for the same problem.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43839 is the canonical PR: it is merged, targets the same issue, and fixes the same grouped_mm autocast dtype mismatch in the relevant code path.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43833", "reason": "Same underlying bug in `_grouped_linear`/`torch._grouped_mm` under autocast, same file, same issue target (#43828). The two PRs differ in implementation detail, but both fix the same concrete dtype-mismatch problem.", "right": "pull_request:43839"}], "summary": "Both pull requests address the same autocast/dtype mismatch in `src/transformers/integrations/moe.py` for `torch._grouped_mm`, and both are explicitly tied to issue #43828. PR #43839 is the stronger canonical PR because it is the merged fix; #43833 appears to be an earlier alternate implementation of the same bugfix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43833|pull_request:43839"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2032, "estimated_input_tokens": 888, "item_count": 2, "node_count": 2, "serialized_chars": 3549, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:20:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "70dc638141fe9e57e9832ed4db979f9c30a44c1f2b7e6cc7def6a489b66724bf", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43871", "pull_request:43966"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43966 is the best representative of the cluster because it is merged, explicitly targets the same issue, and implements the same fix in the relevant code path.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43966 is the better canonical PR because it is the later, merged fix for the same defect and includes the same core change with clearer explanation.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43871", "reason": "Same underlying bug in `dot_natural_key`, same file, same issue target, and nearly identical fix; they are effectively duplicate PRs for the same change.", "right": "pull_request:43966"}], "summary": "Both pull requests fix the same bug in `dot_natural_key` in `src/transformers/core_model_loading.py`: mixed string/int sort-key comparisons during state-dict loading. They target the same issue and make essentially the same code-path change, so they are duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43871|pull_request:43966"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2580, "estimated_input_tokens": 1162, "item_count": 2, "node_count": 2, "serialized_chars": 4646, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:21:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8d71f954769ac7b7fabb3a0a0490fc68a1aa72305ddea09fec45589a84ff2e4f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43876", "pull_request:43877"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "Neither PR is a strong global representative for this cluster because the changes are unrelated. 43876 is marginally cleaner as a representative only because it is narrowly scoped and clearly tied to one config fix.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR emerges from the pair because they touch different model families and different code paths. If one had to be chosen, 43876 is slightly more self-contained, but it is not a meaningful cluster representative for the other PR.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43876", "reason": "Different subsystems and bug classes: GLM MoE config default handling versus fast image preprocessing/patch counting. They do not appear mergeable into one PR or fix the same concrete issue.", "right": "pull_request:43877"}], "summary": "These two pull requests are unrelated: one fixes a GLM MoE DSA config default in modular conversion, while the other restores a missing fast image patch counter in a GLM/Ernie vision processor path. They do not target the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43876|pull_request:43877"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2358, "estimated_input_tokens": 1051, "item_count": 2, "node_count": 2, "serialized_chars": 4202, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:21:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7ad56291a6ae189575bfd8ef72772b72c73bfb37a9509140a49ea8c96d4b0f22", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43890", "pull_request:43891"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "43890 is the stronger representative only by being a focused regression guard; 43891 is a separate RAG test extension and does not match the same underlying problem.", "canonical_issue_reason": null, "canonical_pr_reason": "No issue artifacts are present. If a PR must represent the cluster, 43890 is the more clearly regression-focused, single-purpose change, but the pair is not actually duplicate-worthy.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43890", "reason": "Different subsystems and different goals: BART memory regression test vs. RAG retriever smoke coverage. They do not fix the same concrete bug or code path, and would not plausibly be merged into one PR.", "right": "pull_request:43891"}], "summary": "These are two unrelated test-only pull requests in different model areas: BART memory regression coverage vs. RAG retriever smoke coverage. They share only broad testing intent, not the same bug or code path."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43890|pull_request:43891"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2694, "estimated_input_tokens": 1219, "item_count": 2, "node_count": 2, "serialized_chars": 4873, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:22:00Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8372c7c801e1d86a619316af99b73cb24117c60d8a4dbcf79f6d2b92f1307d5b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43925", "pull_request:43941"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43941 best represents the cluster\u2019s underlying theme of Windows encoding failures in Python file I/O, whereas #43925 is a narrower infrastructure-only fix.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43941 is the stronger canonical representative because it is the broader, user-facing Windows UnicodeDecodeError fix across multiple examples and has the most explicit problem statement.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43925", "reason": "Same general encoding theme, but they touch different subsystems and concrete file-write paths; they are not the same underlying bug or a single mergeable PR.", "right": "pull_request:43941"}], "summary": "Both PRs address Windows text-encoding problems, but they fix different code paths: one updates CircleCI helper scripts, the other patches several PyTorch example scripts. They are related by theme, not duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43925|pull_request:43941"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2418, "estimated_input_tokens": 1081, "item_count": 2, "node_count": 2, "serialized_chars": 4321, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:22:30Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f4e82b8a48bc34bd2144703329a04b7f495bd41e1f0e20927893c347b6ff891c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43928", "pull_request:44362"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No best PR for duplicate triage; neither PR is a duplicate of the other, so there is no single canonical representative for the pair.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: these PRs address different bugs in different modules and do not appear to be mergeable into a single change.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43928", "reason": "Different code paths and failure modes: Dia config save/load token IDs vs MLuke tokenizer AttributeError. No shared files, targets, or concrete bug.", "right": "pull_request:44362"}], "summary": "The two pull requests are unrelated and should not be clustered as duplicates: one fixes DiaConfig token ID persistence, the other fixes an MLuke tokenizer AttributeError after a refactor."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43928|pull_request:44362"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3392, "estimated_input_tokens": 1568, "item_count": 3, "node_count": 3, "serialized_chars": 6270, "soft_pair_count": 2}, "cached_at": "2026-04-14T14:23:26Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "26cdcf0dff471c85a9bfe4ae79582050c00f4a600a187abb9a95e29f46434f73", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43934", "pull_request:43958", "pull_request:44480"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#43958 is the closest representative only because it is a Dockerfile dependency update like #44480, but it still fixes a different package/install need and should not subsume the others.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR: the cluster mixes an unrelated quantization feature PR with two different Dockerfile dependency tweaks.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43934", "reason": "Different work entirely: Metal quantization feature/docs versus a CI Dockerfile change to install kernels.", "right": "pull_request:43958"}, {"accept": false, "left": "pull_request:43958", "reason": "Both touch CI Dockerfiles, but they add different dependencies for different reasons (kernels for quantization tests vs diffusers for VibeVoice), so they are not the same change.", "right": "pull_request:44480"}], "summary": "These PRs are not duplicates. #43934 is a Metal quantization feature/docs change, while #43958 and #44480 are separate Dockerfile dependency updates for different CI needs. The two Dockerfile PRs are only superficially similar."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43934|pull_request:43958", "pull_request:43958|pull_request:44480"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3220, "estimated_input_tokens": 1482, "item_count": 2, "node_count": 2, "serialized_chars": 5926, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:23:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "03556185fbe58ee0ac6d1377b5be5355eaa0c18aab0728d42b460c66ae3c5cba", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44006", "pull_request:44069"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "Neither PR is a clear duplicate of the other. If one must represent the cluster, 44069 is the broader stability sweep, but it does not subsume 44006.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR: the two PRs fix different bugs and should remain separate rather than being deduplicated.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44006", "reason": "Both are numerical-stability related, but they modify different formulas/checks in different model families and would not plausibly be merged into one concrete fix.", "right": "pull_request:44069"}], "summary": "Two merged PRs that both address numerical-stability cleanups, but in different code paths and with different fixes: one replaces log-based perplexity with torch.xlogy in speech quantizers; the other replaces inf/nan checks with torch.isfinite across many models. They are not the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44006|pull_request:44069"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 4068, "estimated_input_tokens": 1906, "item_count": 3, "node_count": 3, "serialized_chars": 7624, "soft_pair_count": 3}, "cached_at": "2026-04-14T14:24:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1d3c098802d6eaaf18625724084795a6bcc189d77ef02269d0ad39ef8050e3a0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44058", "pull_request:44097", "pull_request:44103"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44103 is the best representative duplicate target because it addresses the core model bug directly and with the least unrelated surface area.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44103 is the clearest direct fix for the cache_position handling bug in glm_moe_dsa; it targets the concrete forward/caching code path and is narrower than 44058\u2019s more mixed test-related patch.", "confidence": 0.83, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44058", "reason": "Both PRs patch the same glm_moe_dsa cache_position / key-cache handling bug in the model forward path and touch the same model files, so they look like duplicate fixes that could plausibly be merged into one PR.", "right": "pull_request:44103"}, {"accept": false, "left": "pull_request:44058", "reason": "44097 is a test refactor/cleanup around keep_in_fp32_modules, not the same concrete code-path bug as 44058\u2019s cache-management fix; shared issue target and test file overlap are not enough.", "right": "pull_request:44097"}, {"accept": false, "left": "pull_request:44097", "reason": "44097 only merges/renames fp32-module tests, while 44103 fixes glm_moe_dsa caching behavior; they are different changes despite both mentioning the same model area and issue target.", "right": "pull_request:44103"}], "summary": "Two PRs (44058 and 44103) appear to fix the same glm_moe_dsa cache-position/cache-management bug; 44097 is a separate test cleanup/merge and not the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:44058|pull_request:44103", "pull_request:44058|pull_request:44097", "pull_request:44097|pull_request:44103"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2442, "estimated_input_tokens": 1093, "item_count": 2, "node_count": 2, "serialized_chars": 4370, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:25:24Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c063131231ffd6fffc2cd9b6bc554bb954f33db4a78706e60a7248804bb41611", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44064", "pull_request:44124"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44124 is the most representative artifact in the cluster because it cleanly implements the final-evaluation feature; the other PR is not the same concrete change.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44124 directly adds `eval_on_end` in `TrainingArguments` and triggers final evaluation at the end of training, matching the feature described in its title and diff.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44064", "reason": "They overlap in Trainer files, but the diffs point to different changes: #44124 adds end-of-training evaluation, while #44064 shows a separate training-loop/push_to_hub refactor and does not clearly implement the same code-path fix.", "right": "pull_request:44124"}], "summary": "PR #44124 is the clear eval_on_end implementation; PR #44064 appears to be a different Trainer training-loop refactor despite overlapping files and a similar title. I would not merge them as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44064|pull_request:44124"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 13658, "estimated_input_tokens": 6701, "item_count": 10, "node_count": 10, "serialized_chars": 26803, "soft_pair_count": 24}, "cached_at": "2026-04-14T14:26:31Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "79382568728c4dfee3eaa3697f5b4870fe6065bb4a03257a58300bbbf5d28787", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44107", "pull_request:44108", "pull_request:44109", "pull_request:44110", "pull_request:44111", "pull_request:44133", "pull_request:44134", "pull_request:44135", "pull_request:44136", "pull_request:44137"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44136 is the strongest overall representative because it has the broadest scoped refactor among the near-duplicates and includes the vitdet change plus a related companion file, but it should only represent the vitdet/lw_detr subcluster rather than the whole set.", "canonical_issue_reason": null, "canonical_pr_reason": "No single global canonical PR across the full set; the items split into separate model-specific refactor subclusters. If a representative is needed, 44136 is the broadest vitdet-related example and subsumes the narrower 44108 vitdet-only change.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44109", "reason": "Different models/code paths (HGNetV2 vs ViTDet); same refactor pattern, but not the same underlying change.", "right": "pull_request:44136"}, {"accept": true, "left": "pull_request:44108", "reason": "Same ViTDet output-tracing refactor on the same file; 44136 is a superset-style follow-on with the same concrete change.", "right": "pull_request:44136"}, {"accept": true, "left": "pull_request:44107", "reason": "Same MRA file and same output-tracing decorator refactor; clearly the same underlying change.", "right": "pull_request:44137"}, {"accept": true, "left": "pull_request:44110", "reason": "Same TVP file and same capture_outputs/output-tracing migration; same concrete refactor.", "right": "pull_request:44134"}, {"accept": true, "left": "pull_request:44109", "reason": "Same HGNetV2 refactor in the same implementation files; same underlying output-tracing change.", "right": "pull_request:44135"}, {"accept": true, "left": "pull_request:44111", "reason": "Same PoolFormer file and same output-tracing/capture_outputs refactor; same change.", "right": "pull_request:44133"}, {"accept": false, "left": "pull_request:44136", "reason": "Different model families (ViTDet/lw_detr vs MRA); same broad refactor theme but unrelated code paths.", "right": "pull_request:44137"}, {"accept": false, "left": "pull_request:44107", "reason": "Different models (MRA vs ViTDet/lw_detr); not the same concrete bug or change.", "right": "pull_request:44136"}, {"accept": false, "left": "pull_request:44107", "reason": "Different model implementations (MRA vs ViTDet); shared refactor style only, not a duplicate change.", "right": "pull_request:44108"}, {"accept": false, "left": "pull_request:44133", "reason": "PoolFormer vs ViTDet/lw_detr are unrelated model code paths; same decorator migration pattern, but not the same change.", "right": "pull_request:44136"}, {"accept": false, "left": "pull_request:44134", "reason": "TVP vs ViTDet/lw_detr are different model-specific refactors; reject as non-duplicates.", "right": "pull_request:44136"}, {"accept": false, "left": "pull_request:44108", "reason": "ViTDet and MRA are different model files and code paths; only the refactor theme matches.", "right": "pull_request:44137"}, {"accept": false, "left": "pull_request:44108", "reason": "ViTDet vs PoolFormer are unrelated implementations; same tracing refactor pattern, not the same underlying change.", "right": "pull_request:44111"}, {"accept": false, "left": "pull_request:44108", "reason": "ViTDet vs TVP are different model code paths; not a duplicate.", "right": "pull_request:44110"}, {"accept": false, "left": "pull_request:44133", "reason": "PoolFormer vs MRA are separate model implementations; same style of refactor only.", "right": "pull_request:44137"}, {"accept": false, "left": "pull_request:44134", "reason": "TVP vs MRA are unrelated model files; not the same concrete bug/change.", "right": "pull_request:44137"}, {"accept": false, "left": "pull_request:44133", "reason": "PoolFormer and TVP are different model-specific output-tracing refactors, not one duplicate change.", "right": "pull_request:44134"}, {"accept": false, "left": "pull_request:44107", "reason": "MRA vs PoolFormer are different model code paths; reject despite similar decorator migration.", "right": "pull_request:44111"}, {"accept": false, "left": "pull_request:44107", "reason": "MRA vs TVP are unrelated model refactors; same theme only.", "right": "pull_request:44110"}, {"accept": false, "left": "pull_request:44108", "reason": "ViTDet and TVP are separate model files with different code paths; not duplicates.", "right": "pull_request:44134"}, {"accept": false, "left": "pull_request:44133", "reason": "PoolFormer vs HGNetV2 are different models; same output-tracing pattern is not enough to merge.", "right": "pull_request:44135"}, {"accept": false, "left": "pull_request:44110", "reason": "TVP vs PoolFormer are distinct model implementations; no shared concrete change.", "right": "pull_request:44111"}, {"accept": false, "left": "pull_request:44110", "reason": "TVP is unrelated to the ViTDet/lw_detr refactor; same broad migration pattern, different code path.", "right": "pull_request:44136"}, {"accept": false, "left": "pull_request:44110", "reason": "TVP vs PoolFormer are different model-specific changes, not a duplicate.", "right": "pull_request:44133"}], "summary": "This cluster is mostly a set of parallel model-specific refactors to output tracing/capture decorators, not one single duplicate change. Duplicate matches are only valid within the same model/file family; cross-model pairs should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44109|pull_request:44136", "pull_request:44108|pull_request:44136", "pull_request:44107|pull_request:44137", "pull_request:44110|pull_request:44134", "pull_request:44109|pull_request:44135", "pull_request:44111|pull_request:44133", "pull_request:44136|pull_request:44137", "pull_request:44107|pull_request:44136", "pull_request:44107|pull_request:44108", "pull_request:44133|pull_request:44136", "pull_request:44134|pull_request:44136", "pull_request:44108|pull_request:44137", "pull_request:44108|pull_request:44111", "pull_request:44108|pull_request:44110", "pull_request:44133|pull_request:44137", "pull_request:44134|pull_request:44137", "pull_request:44133|pull_request:44134", "pull_request:44107|pull_request:44111", "pull_request:44107|pull_request:44110", "pull_request:44108|pull_request:44134", "pull_request:44133|pull_request:44135", "pull_request:44110|pull_request:44111", "pull_request:44110|pull_request:44136", "pull_request:44110|pull_request:44133"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 8192, "estimated_input_tokens": 3968, "item_count": 7, "node_count": 7, "serialized_chars": 15870, "soft_pair_count": 8}, "cached_at": "2026-04-14T14:27:23Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0b2d5256e8627b0d5e76be92b27a33ec7d57c5bbaee32974474bec49fc98f0b3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44108", "pull_request:44109", "pull_request:44111", "pull_request:44133", "pull_request:44134", "pull_request:44135", "pull_request:44136"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44111 is the most suitable cluster representative because it is focused on one concrete code path, has a clean title, and matches the duplicate PoolFormer PR most closely.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44111 is the best canonical representative: it is a self-contained single-model refactor, and it has the clearest exact duplicate in the cluster (44133). It is also less entangled than the multi-model variants.", "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44135", "reason": "Same refactor theme, but different concrete scope: 44136 spans vitdet plus lw_detr, while 44135 is hgnet_v2 only. That is not the same underlying PR/change.", "right": "pull_request:44136"}, {"accept": false, "left": "pull_request:44111", "reason": "Both are output-tracing refactors, but they affect different model files and code paths (PoolFormer vs HGNetV2). Similar pattern is not enough to treat them as duplicates.", "right": "pull_request:44135"}, {"accept": false, "left": "pull_request:44109", "reason": "Different model implementations and files. They share a refactor style, not the same concrete bugfix/change.", "right": "pull_request:44111"}, {"accept": false, "left": "pull_request:44134", "reason": "TVP vs HGNetV2 are separate model code paths; these are parallel refactors, not duplicates.", "right": "pull_request:44135"}, {"accept": false, "left": "pull_request:44111", "reason": "PoolFormer and TVP are different model implementations. The shared capture_outputs pattern does not make them the same change.", "right": "pull_request:44134"}, {"accept": false, "left": "pull_request:44111", "reason": "44136 is a broader multi-model PR (vitdet plus lw_detr), so it is not the same concrete change as the PoolFormer-only refactor in 44111.", "right": "pull_request:44136"}, {"accept": false, "left": "pull_request:44109", "reason": "HGNetV2 and PoolFormer are unrelated concrete code paths; these are similar refactors across different models, not duplicates.", "right": "pull_request:44133"}, {"accept": false, "left": "pull_request:44108", "reason": "VitDet and HGNetV2 are distinct model implementations. The PRs share only the generic output-tracing refactor theme.", "right": "pull_request:44109"}], "summary": "These PRs are all variations of the same broad refactor pattern (\u201cuse output tracing / capture_outputs\u201d), but they target different model implementations, so most are not true duplicates. The only clear duplicate in the cluster is the PoolFormer pair (44111 vs 44133), which is not one of the provided soft-edge pairs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44135|pull_request:44136", "pull_request:44111|pull_request:44135", "pull_request:44109|pull_request:44111", "pull_request:44134|pull_request:44135", "pull_request:44111|pull_request:44134", "pull_request:44111|pull_request:44136", "pull_request:44109|pull_request:44133", "pull_request:44108|pull_request:44109"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3610, "estimated_input_tokens": 1677, "item_count": 2, "node_count": 2, "serialized_chars": 6706, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:27:45Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "85be57b209742d22b7763d5f9df49b2ae561f25bad932709de5332099e85bdbe", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44130", "pull_request:44181"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44181 is the best representative of the cluster because it centralizes the main cache_position removal work across cache_utils, masking, and model codepaths; #44130 is a smaller dependent cleanup in generation handling.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44181 is the better canonical representative because it makes the broader core API change around removing cache positions, while #44130 is a narrower generation-side follow-up.\n", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44130", "reason": "Related refactor, but not the same concrete bug/change: #44130 removes cache_position reliance in generation input preparation and candidate generation, while #44181 removes cache positions from the core cache/masking APIs and many model backends. Overlap in filenames reflects the shared refactor, not a single mergeable PR.", "right": "pull_request:44181"}], "summary": "Both PRs are part of the broader cache_position removal refactor, but they target different layers and concrete changes: one is generation input-prep/candidate generation, the other is the core cache/masking API and many model implementations. They are related, but not the same underlying change and should not be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44130|pull_request:44181"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5552, "estimated_input_tokens": 2648, "item_count": 4, "node_count": 4, "serialized_chars": 10592, "soft_pair_count": 6}, "cached_at": "2026-04-14T14:28:14Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8d9b638e8717c0dca6649bf8ca3acfdaed07637ec1dddb5e5e9388e87fb57383", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44182", "pull_request:44211", "pull_request:44216", "pull_request:44249"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44249 is the best global PR to anchor the cluster: it addresses the underlying behavior change in the core pipeline implementation, not just documentation, and it is the clearest, most complete fix for the `grouped_entities` compatibility problem.", "canonical_issue_reason": null, "canonical_pr_reason": "44249 is the best representative PR because it directly changes the runtime token-classification code path to support deprecated `grouped_entities` with explicit validation and mapping to `aggregation_strategy`. It is more complete and concrete than the docs-focused 44182, and more precise than the simpler compatibility variant in 44211.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44211", "reason": "Both PRs implement backward compatibility for the deprecated `grouped_entities` parameter in `token_classification.py`, mapping it to `aggregation_strategy` in the same code path. They are close enough to be merged into one fix.", "right": "pull_request:44249"}, {"accept": false, "left": "pull_request:44182", "reason": "44182 is primarily docs/migration cleanup removing references to `grouped_entities`; 44249 is a runtime compatibility fix. Same file target is not enough to make them the same change.", "right": "pull_request:44249"}, {"accept": false, "left": "pull_request:44211", "reason": "44216 is mis-titled and does not clearly show the same concrete change; sharing the issue target and file alone is insufficient to treat it as the same fix.", "right": "pull_request:44216"}, {"accept": false, "left": "pull_request:44216", "reason": "44216 does not present the same implementation detail or behavior change as 44249; the overlap is only in the touched file and issue reference.", "right": "pull_request:44249"}, {"accept": false, "left": "pull_request:44182", "reason": "44182 removes old docs references, while 44211 adds deprecation compatibility logic in code. These are related but distinct changes.", "right": "pull_request:44211"}, {"accept": false, "left": "pull_request:44182", "reason": "44182 is documentation cleanup; 44216 is not clearly the same code-path fix and appears unrelated beyond the shared issue target.", "right": "pull_request:44216"}], "summary": "This cluster is centered on the deprecated `grouped_entities` parameter in token-classification. Two PRs are genuine code-level backward-compatibility fixes, one PR is mostly documentation cleanup, and one appears mis-titled/unrelated. The strongest duplicate pair is 44211 vs 44249."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44211|pull_request:44249", "pull_request:44182|pull_request:44249", "pull_request:44211|pull_request:44216", "pull_request:44216|pull_request:44249", "pull_request:44182|pull_request:44211", "pull_request:44182|pull_request:44216"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2556, "estimated_input_tokens": 1150, "item_count": 2, "node_count": 2, "serialized_chars": 4598, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:29:32Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "bfd748b35a56dacfbd79a64b39e59fd6c244fd35c520d67a0e31a8a63b4851db", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44207", "pull_request:44210"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44207 is the best representative because it is the merged implementation of the regression fix and most fully reflects the underlying change, while #44210 is just a subset of the same adjustment.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44207 is the stronger canonical PR: it is merged, explicitly targets issue #44206, and appears to carry the complete fix across the related LASR/Voxtral feature-extractor codepaths. PR #44210 is a narrower unmerged variant of the same bugfix.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44207", "reason": "Both PRs remove the same invalid `center` argument from LASR fbank extraction for the same issue #44206; they address the same underlying bug and could plausibly have been merged as one PR.", "right": "pull_request:44210"}], "summary": "Two PRs target the same LASR regression caused by an invalid `center` argument in fbank feature extraction. PR #44207 is the merged, broader fix; PR #44210 is a narrower unmerged duplicate of the same code-path change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44207|pull_request:44210"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1848, "estimated_input_tokens": 796, "item_count": 2, "node_count": 2, "serialized_chars": 3183, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:29:49Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ed60dc56a2975a29d3e219c9a9218d6d48dfa9adf06a9ed7bbe900961eb78cf8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44218", "pull_request:44219"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No global best PR for deduplication: both are tiny, independent docs fixes and neither is a stronger representative of the other.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR here: 44218 and 44219 fix separate typos in separate documentation files, so neither subsumes the other.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44218", "reason": "Different files and different typo fixes ('sentencepiece' install command vs 'docstring' wording). They share only a broad docs-maintenance theme, not the same underlying change.", "right": "pull_request:44219"}], "summary": "Two small merged documentation PRs in Italian, each fixing a different typo in a different doc page. They are loosely similar as doc-only edits, but not duplicates of the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44218|pull_request:44219"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6112, "estimated_input_tokens": 2928, "item_count": 4, "node_count": 4, "serialized_chars": 11712, "soft_pair_count": 6}, "cached_at": "2026-04-14T14:31:53Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1c0c5407bc96e9cdb60a06f1126dc194206015246572e4f85607c3c143f4ebf3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44243", "pull_request:44274", "pull_request:44284", "pull_request:44586"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44243 is the best single representative: smallest scope, clear title, explicit issue link, and a direct code-path fix for the reported Mixtral bug.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44243 is the most focused fix for the exact Mixtral gating bug in the main modeling file, directly tied to issue #44242 without additional broad-sweep changes.", "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44243", "reason": "#44586 is a broad cross-model MoE sweep touching many files and model families; it is not the same concrete Mixtral-only change as #44243, even though both mention the same issue.", "right": "pull_request:44586"}, {"accept": true, "left": "pull_request:44274", "reason": "Both PRs target the same Mixtral aux-loss bug, modify the same modular Mixtral path, and include the same supporting test/repro structure; they look like duplicate implementations of the same fix.", "right": "pull_request:44284"}, {"accept": false, "left": "pull_request:44284", "reason": "#44586 is a much broader multi-model refactor/fix, so it should not be merged with the narrower modular-Mixtral-only PR #44284 as a duplicate.", "right": "pull_request:44586"}, {"accept": false, "left": "pull_request:44274", "reason": "#44586 expands to many unrelated MoE model files; that goes beyond the same concrete Mixtral code-path change fixed by #44274.", "right": "pull_request:44586"}, {"accept": true, "left": "pull_request:44243", "reason": "Both PRs address the same underlying Mixtral `output_router_logits=False` aux-loss bug, just in different Mixtral implementation files; they are plausible duplicate fixes for the same behavior.", "right": "pull_request:44274"}, {"accept": true, "left": "pull_request:44243", "reason": "#44284 is another fix for the same Mixtral aux-loss gating issue, and its scope overlaps the same behavioral change as #44243, making them duplicates at the bug-fix level.", "right": "pull_request:44284"}], "summary": "These are all variants of the same Mixtral auxiliary/load-balancing loss bug around `output_router_logits=False`, except PR #44586 which broadens the change across many MoE model families. The two modular-Mixtral PRs are near-duplicates of each other; the targeted Mixtral-only PR is the cleanest representative of the cluster."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44243|pull_request:44586", "pull_request:44274|pull_request:44284", "pull_request:44284|pull_request:44586", "pull_request:44274|pull_request:44586", "pull_request:44243|pull_request:44274", "pull_request:44243|pull_request:44284"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2292, "estimated_input_tokens": 1018, "item_count": 2, "node_count": 2, "serialized_chars": 4070, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:32:41Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "17150cbff71c252cd7623154408203ccaab7acc55c368227994220babf7d3df8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44254", "pull_request:44294"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44254 is the best representative of the cluster since it targets the underlying fast-tokenizer overwrite behavior rather than a model-specific routing workaround.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44254 is the more canonical fix because it directly addresses the tokenizer initialization bug that overwrote a custom `pre_tokenizer` from `tokenizer.json` in the implementation itself.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44254", "reason": "Related theme, but different fixes and different code paths: #44254 changes GPT-2 tokenizer initialization to stop overwriting a custom pre-tokenizer, while #44294 only remaps Olmo3 to `TokenizersBackend`. They are not the same underlying change and would not plausibly merge as one PR.", "right": "pull_request:44294"}], "summary": "Two PRs share a theme around preserving custom fast-tokenizer pre-tokenizers, but they fix different code paths: one patches GPT-2 tokenizer construction, the other changes Olmo3\u2019s auto-mapping to a different backend. Not a duplicate PR pair."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44254|pull_request:44294"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 9100, "estimated_input_tokens": 4422, "item_count": 7, "node_count": 7, "serialized_chars": 17686, "soft_pair_count": 7}, "cached_at": "2026-04-14T14:33:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "dc778ef5e560e35c1ca2656ca0f98f887d28a2b34df806b42ba632cbb9ab024f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44267", "pull_request:44323", "pull_request:44324", "pull_request:44326", "pull_request:44787", "pull_request:44788", "pull_request:44807"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44323 best anchors the cluster because it is the central, issue-linked document-QA tutorial change; the other PRs are separate docs cleanups or unrelated example edits.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44323 is the clearest representative of the duplicate set: it has the explicit issue link, the core document-question-answering tutorial addition, and PR 44324 is effectively the same change.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44267", "reason": "Same general docs theme, but 44267 only updates the pipeline docstring while 44323 adds a tutorial example and CONTRIBUTING/docs links. Not the same concrete change.", "right": "pull_request:44323"}, {"accept": true, "left": "pull_request:44323", "reason": "They have the same title, same files, and the same explicit issue target (18926); this looks like the same PR/change duplicated.", "right": "pull_request:44324"}, {"accept": false, "left": "pull_request:44323", "reason": "Both touch pipeline_tutorial.md, but 44323 adds a document-QA example while 44326 adds a zero-shot-classification example. Different task, different change.", "right": "pull_request:44326"}, {"accept": false, "left": "pull_request:44323", "reason": "44323 adds a document-QA tutorial example; 44788 removes an outdated question-answering example from pipeline API docs. Related topic, but not the same fix.", "right": "pull_request:44788"}, {"accept": false, "left": "pull_request:44324", "reason": "Same as 44323 vs 44326: one is document QA, the other is zero-shot classification. Shared file alone is not enough.", "right": "pull_request:44326"}, {"accept": false, "left": "pull_request:44787", "reason": "Both remove question-answering references from docs, but they target different documentation pages/sections and language sets. Related cleanup, not a duplicate PR.", "right": "pull_request:44807"}, {"accept": false, "left": "pull_request:44788", "reason": "Both are docs cleanup around question-answering, but one removes an example from pipeline docs and the other removes quicktour table entries. Distinct concrete edits.", "right": "pull_request:44807"}], "summary": "One true duplicate pair exists (44323 and 44324). The rest are loosely related documentation PRs about question-answering / document QA, but they change different pages or even different example tasks, so they should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44267|pull_request:44323", "pull_request:44323|pull_request:44324", "pull_request:44323|pull_request:44326", "pull_request:44323|pull_request:44788", "pull_request:44324|pull_request:44326", "pull_request:44787|pull_request:44807", "pull_request:44788|pull_request:44807"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5856, "estimated_input_tokens": 2800, "item_count": 4, "node_count": 4, "serialized_chars": 11200, "soft_pair_count": 6}, "cached_at": "2026-04-14T14:34:30Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ae284be0cfed97cdeeab66a89404f86535e126fcc0b8007de582f8535abc58fc", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44289", "pull_request:44312", "pull_request:44725", "pull_request:44809"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44289 is the best representative PR for the duplicate cluster because it is the earliest exact match and is narrowly scoped to one concrete code change.", "canonical_issue_reason": null, "canonical_pr_reason": "44289 is the earliest and cleanest instance of the identical FlexOlmo/Olmo2/Olmo3 bare-except replacement; 44312 appears to be a duplicate reopening with the same diff and title.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44289", "reason": "Same title, same files, and same diff replacing bare except clauses with except Exception; clearly the same change.", "right": "pull_request:44312"}, {"accept": false, "left": "pull_request:44289", "reason": "Different underlying changes: 44289 fixes bare excepts in FlexOlmo/Olmo2/Olmo3, while 44725 changes flex_attention and several unrelated model/config files.", "right": "pull_request:44725"}, {"accept": false, "left": "pull_request:44289", "reason": "Different code paths and scope: 44289 is a multi-file bare-except cleanup, while 44809 is focused on Fuyu image processing plus unrelated XLNet edits.", "right": "pull_request:44809"}, {"accept": false, "left": "pull_request:44312", "reason": "44312 is the same as 44289, but 44725 is a different broad flex_attention change; not the same concrete fix.", "right": "pull_request:44725"}, {"accept": false, "left": "pull_request:44312", "reason": "44312 matches 44289\u2019s bare-except cleanup, whereas 44809 addresses different files and a different fix path.", "right": "pull_request:44809"}, {"accept": false, "left": "pull_request:44725", "reason": "Same generic title pattern, but the diffs target different functionality and unrelated files; not mergeable as one PR.", "right": "pull_request:44809"}], "summary": "Two distinct duplicate groups are present: PRs 44289 and 44312 are the same change; the Fuyu-related PRs are not duplicates of the Flex/Olmo bare-except fix, and 44725 and 44809 are also not the same underlying change despite similar titles."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44289|pull_request:44312", "pull_request:44289|pull_request:44725", "pull_request:44289|pull_request:44809", "pull_request:44312|pull_request:44725", "pull_request:44312|pull_request:44809", "pull_request:44725|pull_request:44809"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2324, "estimated_input_tokens": 1034, "item_count": 2, "node_count": 2, "serialized_chars": 4135, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:34:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "bcebc6097ad28e8775061757947bff7ed40bc79314ca41e360d543f0762b616e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44316", "pull_request:44345"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44345 is the best representative of the fix because it solves the concrete user-facing problem in a precise way (`isatty()` guard) rather than only reducing update chatter.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44345 is the stronger canonical PR: it directly addresses the CI/non-TTY log spam case, cites the tracking issue, and includes the more targeted behavior change plus tests.", "confidence": 0.88, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44316", "reason": "Both modify the same weight-loading progress bar code path to reduce verbosity for the same underlying issue, and the two changes could plausibly be merged into one PR.", "right": "pull_request:44345"}], "summary": "Both PRs target the same weight-loading tqdm verbosity problem in `core_model_loading.py`, with 44345 being the more complete fix by gating the postfix on non-TTY output. They are close enough to treat as duplicates/alternate fixes for the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44316|pull_request:44345"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 4154, "estimated_input_tokens": 1949, "item_count": 3, "node_count": 3, "serialized_chars": 7796, "soft_pair_count": 3}, "cached_at": "2026-04-14T14:35:27Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b8de54a38aa103a82bae3116bd3467790cde6d444738edc9785050f6ad7eea99", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44317", "pull_request:44346", "pull_request:44585"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44346 is the best representative of the core bug/fix in this cluster; #44317 is effectively the same patch with a less precise title, while #44585 is broader and spans additional files/models beyond the exact V2 fix.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44346 is the cleanest canonical representative: it has the clearest title, targets the exact DeepseekV2 MLA layernorm epsilon omission, and matches the minimal fix shown in the diff.", "confidence": 0.9, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44317", "reason": "Same DeepseekV2 code path, same issue target, and essentially identical diff: passing rms_norm_eps into q/kv layernorms.", "right": "pull_request:44346"}, {"accept": false, "left": "pull_request:44317", "reason": "Related bug pattern, but #44585 is DeepseekV3-specific and also touches other model files; not the same concrete change/PR.", "right": "pull_request:44585"}, {"accept": false, "left": "pull_request:44346", "reason": "Same general theme, but one is a minimal DeepseekV2 fix and the other is a broader DeepseekV3/multi-file change, so they are not duplicate PRs.", "right": "pull_request:44585"}], "summary": "Two PRs are near-exact duplicates for the DeepseekV2 rms_norm_eps fix. The DeepseekV3 PR is related by theme but is a separate, broader change and should not be merged as a duplicate with the V2-only fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44317|pull_request:44346", "pull_request:44317|pull_request:44585", "pull_request:44346|pull_request:44585"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2372, "estimated_input_tokens": 1058, "item_count": 2, "node_count": 2, "serialized_chars": 4229, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:35:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "cc6b6722042c2f4d370f54c7884fe3ee9adaeac12a9cc503c943cd60604d4235", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44332", "pull_request:45269"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44332 is the strongest standalone PR here: merged, complete, and slightly broader in scope than the open typo-only PR.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44332 is the better cluster representative because it is already merged and reflects a completed typo/docstring cleanup, but it is still a distinct change from 45269.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44332", "reason": "Different typo fixes in different files; no shared concrete bug or code-path change, so they are not duplicates.", "right": "pull_request:45269"}], "summary": "These are two separate typo-fix PRs in different files with different concrete edits; they are not the same underlying change and should not be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44332|pull_request:45269"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2470, "estimated_input_tokens": 1107, "item_count": 2, "node_count": 2, "serialized_chars": 4426, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:36:24Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "de7f27525256acd82e3ba0dbfbede6342b2fedeb7f5c1c397b8c5b14f120ca8f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44344", "pull_request:44427"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44427 is the best standalone PR in this cluster: it targets the underlying tokenizer_class persistence behavior directly and broadly.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44427 is the stronger canonical representative because it addresses the core save_pretrained/from_pretrained path generically and includes tests, whereas 44344 is a narrower model-specific mapping fix.", "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44344", "reason": "Do not merge: 44344 patches Qwen3.5 auto-tokenizer class mapping, while 44427 changes tokenizer load/save behavior to preserve tokenizer_class. Different code paths and not one mergeable fix.", "right": "pull_request:44427"}], "summary": "Both PRs reference the same upstream issue, but they fix different layers: one changes Qwen3.5 auto-tokenizer mapping, the other preserves tokenizer_class during load/save. They are related, not the same concrete change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44344|pull_request:44427"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3706, "estimated_input_tokens": 1725, "item_count": 3, "node_count": 3, "serialized_chars": 6900, "soft_pair_count": 3}, "cached_at": "2026-04-14T14:36:45Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0570bc2e47b436e4bcb3756fcbb2068157cc76ffa78db6aa6a5dbd9df37d1b0c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44354", "pull_request:44363", "pull_request:44584"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44584 is the best representative because it was merged and directly resolves the concrete off-by-one error in the production code path.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44584 is the merged, minimal fix for the exact boundary-condition bug in `decode_spans`, with the same affected file and issue target as the earlier duplicate PRs.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44354", "reason": "Same underlying bug in `document_question_answering.decode_spans`; identical boundary-condition fix (`<` to `<=`) and same issue target, so these are mergeable duplicates.", "right": "pull_request:44584"}, {"accept": true, "left": "pull_request:44354", "reason": "Both PRs fix the same off-by-one `topk == len(scores_flat)` failure in the same function and would be plausible to consolidate into one PR.", "right": "pull_request:44363"}, {"accept": true, "left": "pull_request:44363", "reason": "Same concrete code-path problem and same one-line fix in `decode_spans`; the later merged PR is just a cleaner duplicate resolution.", "right": "pull_request:44584"}], "summary": "All three PRs address the same off-by-one bug in `decode_spans` (`len(scores_flat) < topk` should be `<= topk`) and target the same issue. PR #44584 is the canonical/merged fix; #44354 and #44363 are near-duplicate earlier attempts with the same code-path change and regression coverage."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44354|pull_request:44584", "pull_request:44354|pull_request:44363", "pull_request:44363|pull_request:44584"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1924, "estimated_input_tokens": 834, "item_count": 2, "node_count": 2, "serialized_chars": 3335, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:37:04Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4f895b6f09d12956944ff53e33ecc7df49b1a10f9c1f7b6a0840be26bc670b4a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44372", "pull_request:44511"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44372 is the strongest representative of the duplicate cluster because it contains the same fix with a clearer description and fuller rationale.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44372 is the better canonical PR: it is slightly earlier, has more context in the body, and expresses the fix more explicitly while making the same code change as #44511.", "confidence": 0.99, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44372", "reason": "Same function, same file, same issue target, and the diffs implement the same fallback for `inspect.getsource()` failures on compiled/Cython functions.", "right": "pull_request:44511"}], "summary": "Both pull requests implement the same narrow fix in `src/transformers/utils/doc.py`: fall back to indentation level 4 when `inspect.getsource()` fails for compiled/Cython/builtin functions. They target the same issue and are effectively duplicate PRs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44372|pull_request:44511"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3706, "estimated_input_tokens": 1725, "item_count": 3, "node_count": 3, "serialized_chars": 6899, "soft_pair_count": 3}, "cached_at": "2026-04-14T14:37:48Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2f974c517f21648f786795b526fc6df705f9162b8a19cd7cdaaa5d6f75da6777", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44374", "pull_request:44547", "pull_request:44590"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44547 is the strongest representative of the cluster because it addresses the concrete docstring correction in the exact target file and appears to be the maintained version of the fix.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44547 is the best canonical PR: it targets the same file and same `position_ids` docstring problem as #44374, is the more recent/open iteration, and has more review activity, suggesting it supersedes the earlier closed PR.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44374", "reason": "Same underlying docstring correction in `src/transformers/modeling_flash_attention_utils.py`, same issue target, and the diffs are materially the same change with minor wording cleanup.", "right": "pull_request:44547"}, {"accept": false, "left": "pull_request:44374", "reason": "Only shares a broad issue target and a similar `position_ids` theme; this PR edits `masking_utils.py`, so it is a separate docstring change in a different code path.", "right": "pull_request:44590"}, {"accept": false, "left": "pull_request:44547", "reason": "Different file and different API surface. Both are docstring edits about `position_ids`, but they do not fix the same concrete change and should not be merged as duplicates.", "right": "pull_request:44590"}], "summary": "Two PRs are near-duplicates for the same docstring fix in `modeling_flash_attention_utils.py`; the third PR updates a similar `position_ids` docstring in a different file and should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44374|pull_request:44547", "pull_request:44374|pull_request:44590", "pull_request:44547|pull_request:44590"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3348, "estimated_input_tokens": 1546, "item_count": 3, "node_count": 3, "serialized_chars": 6183, "soft_pair_count": 2}, "cached_at": "2026-04-14T14:38:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a5804023d797472e76782e575f15b9c395d9f46f3529d3b63156be604e90e79d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44377", "pull_request:44498", "pull_request:44613"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:44377 is the best single PR to represent the cluster\u2019s substantive work; it is a concrete feature implementation rather than a backend mapping tweak or documentation update.", "canonical_issue_reason": null, "canonical_pr_reason": "pull_request:44377 is the strongest representative of the cluster because it makes the main functional code change: adding TP support in the compressed-tensors quantizer. The other PRs are narrower and unrelated in implementation scope.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44377", "reason": "44377 changes quantization TP behavior in code; 44613 only adds/updates documentation for tensor parallelism. Same topic, different artifact type and no shared bug/change.", "right": "pull_request:44613"}, {"accept": false, "left": "pull_request:44498", "reason": "44498 adds Neuron backend support in TP initialization; 44613 is docs-only. They do not fix the same code-path problem and should not be merged as duplicates.", "right": "pull_request:44613"}], "summary": "The three PRs are not duplicates: one implements compressed-tensor TP support, one adds a Neuron backend mapping for TP init, and one is docs-only. They share the tensor-parallel theme but not the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44377|pull_request:44613", "pull_request:44498|pull_request:44613"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3174, "estimated_input_tokens": 1459, "item_count": 2, "node_count": 2, "serialized_chars": 5834, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:38:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8a2f25826020228ea20a83cd9a8c61b446917bdcb75371b88726a40f05b46a16", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44391", "pull_request:44392"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44392 is the best overall representative since it is merged and targets a specific ASR cache behavior change, making it more clearly actionable and canonical than the broader 44391 PR.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44392 is the better canonical representative because it is the more concrete, merged code-path fix in the ASR model, whereas 44391 is a broader integration/documentation change set.", "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44391", "reason": "Do not merge: they share the VibeVoice ASR subsystem, but 44391 is a broad modular/model registration and docs PR, while 44392 specifically replaces the padding cache implementation in the ASR model. Different concrete changes, not one underlying bug/fix.", "right": "pull_request:44392"}], "summary": "These are related VibeVoice ASR pull requests in the same area, but they do not appear to be the same fix. PR 44391 is a broader ASR modular/cache integration change with docs and auto-registry updates, while PR 44392 is a narrower code fix for the ASR padding cache implementation."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44391|pull_request:44392"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 4796, "estimated_input_tokens": 2270, "item_count": 4, "node_count": 4, "serialized_chars": 9078, "soft_pair_count": 3}, "cached_at": "2026-04-14T14:40:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0dcc8b94873068ca06fc7865e79bda249147b65bb8d1568c09c95bcf06bdebfc", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44399", "pull_request:45251", "pull_request:45368", "pull_request:45369"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:45251 is the strongest PR in this cluster to represent the shared beam-search bug because it directly addresses the concrete code path in generation/utils.py and adds test coverage for the multinomial limit behavior.", "canonical_issue_reason": null, "canonical_pr_reason": "pull_request:45251 is the best canonical representative of the beam-search multinomial-limit fix: it targets the same underlying generation bug, has the explicit issue link, and includes a dedicated test file, making it the more complete and representative change.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44399", "reason": "Different bugs and code paths: Qwen3_5 position_ids typo vs streaming serving tokenizer resolution. They do not appear to be the same underlying change.", "right": "pull_request:45368"}, {"accept": false, "left": "pull_request:45368", "reason": "Both touch different areas of the generation stack, but one fixes serving-time tokenizer access and the other fixes beam-search sampling under CUDA multinomial limits. Same broad library, not the same bug or change.", "right": "pull_request:45369"}, {"accept": true, "left": "pull_request:45251", "reason": "These are the same underlying bug fix: beam-search sampling in generation/utils.py can exceed torch.multinomial's CUDA dimension limit when num_beams * vocab_size is too large. Both target issue 45245 and patch the same code path with alternative implementations.", "right": "pull_request:45369"}], "summary": "This cluster contains two unrelated PRs and one tight duplicate pair around beam-search sampling hitting CUDA multinomial limits. The beam-search PRs are the only plausible duplicates; the other two PRs fix different subsystems and bugs."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "Grounded overall: 44399 is clearly a separate Qwen3_5 position_ids fix, 45368 is a separate serving-tokenizer fix, and 45251/45369 both point to the same beam-search multinomial-limit issue in generation/utils.py with shared issue linkage. The soft-edge verdicts stay conservative and do not overclaim duplication across unrelated subsystems."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:44399|pull_request:45368", "pull_request:45368|pull_request:45369", "pull_request:45251|pull_request:45369"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2218, "estimated_input_tokens": 981, "item_count": 2, "node_count": 2, "serialized_chars": 3921, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:40:36Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6632b12923e0d49ffe7b2f1eb29e76b9f080e95dabe084d7dcfc570c3ea1ef25", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44424", "pull_request:44436"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44436 is the best cluster representative because it is the merged change and the clearest end-state fix for the reported continuous-batching behavior.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44436 is the stronger representative: it is merged, directly addresses the multimodal limitation in continuous batching, and adds an explicit safeguard/fallback for unsupported model modalities.", "confidence": 0.79, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44424", "reason": "Related topic and same issue target, but these are different fixes: #44424 tries to make continuous batching work with multimodal inputs, while #44436 disables continuous batching for non-text-only models and falls back to regular generation. They are not the same concrete code-path change and would not naturally merge into one PR.", "right": "pull_request:44436"}], "summary": "Two PRs target the same reported continuous-batching/multimodal problem in `src/transformers/cli/serve.py`, but they implement different fixes: one adapts `apply_chat_template` output handling for multimodal processors, while the other explicitly falls back to regular generation for non-text-only models."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44424|pull_request:44436"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2478, "estimated_input_tokens": 1111, "item_count": 2, "node_count": 2, "serialized_chars": 4442, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:40:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "120162dc862d2fc52b6265bc2e52f3fb227081bd951b19c0f7c6183a1a67da82", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44459", "pull_request:44845"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44845 is the best representative of the underlying bug because it targets the explicit tracking issue and appears to cover the broader torch.compile masking-path problem rather than just one model-local workaround.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44845 is the stronger canonical choice: it is explicitly tied to issue 44458, is the newer/current draft, and patches both the model code and shared masking utilities to address the compile failure more comprehensively.", "confidence": 0.79, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44459", "reason": "Both PRs mention the same issue and same general symptom, but they fix different concrete paths: 44459 changes Mllama cross-attention mask construction, while 44845 changes shared masking_utils padding handling plus model code. They are related fixes, not the same patch, and should not be merged as duplicates.", "right": "pull_request:44845"}], "summary": "Both PRs address Mllama torch.compile attention-mask failures, but they do so with different fixes and at different layers; they are related, not duplicates. PR 44845 looks like the more complete and current attempt."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44459|pull_request:44845"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2982, "estimated_input_tokens": 1363, "item_count": 2, "node_count": 2, "serialized_chars": 5452, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:41:32Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b8053b6db4076ba5e15089b52649da7201dc906e6e10a21395c1e08717f80804", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44467", "pull_request:44468"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#44467 is the better representative because it spans the broader feature across conversion utilities, model tokenizers, and tests, whereas #44468 is a focused implementation detail in one helper.", "canonical_issue_reason": null, "canonical_pr_reason": "#44467 is the broader umbrella PR for placeholder-token support and touches the main conversion/tokenizer surface area; if one PR must represent the cluster, it better captures the overall change scope.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44467", "reason": "Related topic, but not the same concrete code-path problem. #44467 is a wide placeholder-token update across many files; #44468 only replaces placeholder tokens from `added_tokens_decoder` in `tokenization_utils_tokenizers.py`. They could be part of the same project, but not safely merged as duplicates.", "right": "pull_request:44468"}], "summary": "The two PRs are related to placeholder-token handling, but they are not the same concrete change: #44467 is a broad multi-file placeholder-token update, while #44468 is a narrow fix in the tokenizers conversion path for replacing placeholder tokens from `added_tokens_decoder`."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44467|pull_request:44468"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2392, "estimated_input_tokens": 1068, "item_count": 2, "node_count": 2, "serialized_chars": 4271, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:41:44Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "09bf86dabb1103bd404cb789f015b95b3e73eee7b6257cdd7b476be64b48f7a8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44504", "pull_request:44505"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#44505 is the strongest representative of the cluster: same file, same function, same user-facing bug, and the most complete implementation of the intended validation behavior.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44505 is the better canonical PR because it covers the full path-validation improvement (`None`, `Path`, and missing-file handling) while #44504 is a narrower subset of the same fix.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44504", "reason": "Same underlying code-path problem in `load_vocab` with nearly identical intent and implementation; #44505 extends #44504\u2019s path validation rather than introducing a different change, so they could plausibly be merged into one PR.", "right": "pull_request:44505"}], "summary": "Both PRs target the same `load_vocab` error-handling fix in BERT tokenization and differ only in scope/detail; #44505 is a slightly broader version of the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44504|pull_request:44505"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6492, "estimated_input_tokens": 3118, "item_count": 3, "node_count": 3, "serialized_chars": 12471, "soft_pair_count": 2}, "cached_at": "2026-04-14T14:42:32Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "723ef76d5b8d0a4e76507fea0105133d21a95d91aab207bb67a486f67d0d2b66", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44510", "pull_request:44513", "pull_request:44533"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44513 is the strongest cluster representative because it is merged, reviewed, explicitly linked to issue 44512, and matches the concrete doc cleanup that 44533 also attempts.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44513 is the best canonical representative: it targets the `transformers run` CLI removal issue, has the same docs-only change as 44533, and is already merged with review activity.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44510", "reason": "Both are docs cleanup PRs, but they address different removals: 44510 drops pipeline task references (`text2text-generation`/`summarization`/`translation`), while 44513 removes obsolete `transformers run` CLI examples. Different underlying changes.", "right": "pull_request:44513"}, {"accept": true, "left": "pull_request:44513", "reason": "These appear to be the same underlying docs change: both target removal of the deprecated `transformers run` command references, touch the same model docs, and overlap heavily in diff scope.", "right": "pull_request:44533"}], "summary": "This cluster contains two distinct documentation cleanup PRs: one removing outdated pipeline-task references (44510) and one removing obsolete `transformers run` CLI examples (44513/44533). The latter two are duplicates of the same underlying doc change; 44510 is unrelated beyond being a docs sweep."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44510|pull_request:44513", "pull_request:44513|pull_request:44533"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 10386, "estimated_input_tokens": 5065, "item_count": 6, "node_count": 6, "serialized_chars": 20260, "soft_pair_count": 15}, "cached_at": "2026-04-14T14:43:13Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "61712b4d60746bb54abfac64850305e10a2bf688e91cce1c4494c12e2fa0b26f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44516", "pull_request:44518", "pull_request:44531", "pull_request:44535", "pull_request:44692", "pull_request:44919"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44535 is the best representative fix for the cluster because it is the most complete and directly framed around the actual failure mode, with explicit issue linkage and test coverage.", "canonical_issue_reason": null, "canonical_pr_reason": "44535 is the strongest canonical PR: it is explicitly scoped to the crash, has the clearest title, targets the underlying batched padding=False failure, and includes tests plus the relevant processor/modular changes.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44516", "reason": "Same underlying ragged-batch crash in Qwen2_5_VLProcessor; both patch the same mm_token_type_ids code path and differ only in implementation style.", "right": "pull_request:44518"}, {"accept": true, "left": "pull_request:44516", "reason": "Both address the same crash when processing unpadded batched input_ids, and both modify the mm_token_type_ids computation on the same processor path.", "right": "pull_request:44919"}, {"accept": true, "left": "pull_request:44516", "reason": "Same bug and same code path; the diffs are near-equivalent fixes for handling ragged input_ids sequence-by-sequence.", "right": "pull_request:44692"}, {"accept": true, "left": "pull_request:44516", "reason": "Both are fixes for the same ragged/unpadded batch crash in Qwen2_5_VLProcessor and could be merged into one PR.", "right": "pull_request:44535"}, {"accept": true, "left": "pull_request:44518", "reason": "Same concrete failure mode: batched input without padding causes mm_token_type_ids computation to break, and both fixes iterate per sequence.", "right": "pull_request:44919"}, {"accept": true, "left": "pull_request:44518", "reason": "These are clearly duplicate fixes for the same ragged input_ids crash in the Qwen2_5_VL processor path.", "right": "pull_request:44692"}, {"accept": true, "left": "pull_request:44518", "reason": "Same crash, same subsystem, and compatible changes to the processor path; they could plausibly be consolidated.", "right": "pull_request:44535"}, {"accept": true, "left": "pull_request:44518", "reason": "Same underlying bug and same affected code path; only minor implementation differences separate them.", "right": "pull_request:44531"}, {"accept": true, "left": "pull_request:44531", "reason": "Both fix the same unpadded batched input crash in the Qwen2_5_VL chat/template processing path.", "right": "pull_request:44692"}, {"accept": true, "left": "pull_request:44531", "reason": "Same underlying processor crash on ragged batch input_ids; the fixes are materially the same.", "right": "pull_request:44919"}, {"accept": true, "left": "pull_request:44531", "reason": "Same concrete bug and same family of code changes around mm_token_type_ids for ragged batches.", "right": "pull_request:44535"}, {"accept": true, "left": "pull_request:44535", "reason": "Both are fixes for the same crash in Qwen2_5_VLProcessor when padding=False leads to ragged batches.", "right": "pull_request:44692"}, {"accept": true, "left": "pull_request:44535", "reason": "Same underlying issue and overlapping processor logic; they are effectively duplicate fixes for the batched ragged-input crash.", "right": "pull_request:44919"}, {"accept": true, "left": "pull_request:44692", "reason": "Same failure mode and same mm_token_type_ids computation path, differing only in where the sequence-wise handling is applied.", "right": "pull_request:44919"}], "summary": "All items are closely related PRs fixing the same Qwen2_5_VL crash around ragged/unpadded batched input and mm_token_type_ids computation. The main differences are implementation style and whether the fix is applied in modular_qwen2_5_vl.py, processing_qwen2_5_vl.py, or both."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44516|pull_request:44518", "pull_request:44516|pull_request:44919", "pull_request:44516|pull_request:44692", "pull_request:44516|pull_request:44535", "pull_request:44516|pull_request:44531", "pull_request:44518|pull_request:44919", "pull_request:44518|pull_request:44692", "pull_request:44518|pull_request:44535", "pull_request:44518|pull_request:44531", "pull_request:44531|pull_request:44692", "pull_request:44531|pull_request:44919", "pull_request:44531|pull_request:44535", "pull_request:44535|pull_request:44692", "pull_request:44535|pull_request:44919", "pull_request:44692|pull_request:44919"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1948, "estimated_input_tokens": 846, "item_count": 2, "node_count": 2, "serialized_chars": 3384, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:43:31Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "41c622ad784f234bd1b00dc770c96d65ee5622195851ff8e32a7e8a16b717fb6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44524", "pull_request:44525"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44524 is the cleaner standalone bug fix: it addresses a concrete state-tracking bug in AssistantTracker with a minimal, targeted change.", "canonical_issue_reason": null, "canonical_pr_reason": "No clear canonical PR for a duplicate cluster: the PRs fix different functions and different failure modes, so neither is a duplicate of the other.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44524", "reason": "Different code paths and different bugs: AssistantTracker.is_active() state handling versus _parse_type_hint union parsing. Shared filename alone is not enough to treat them as the same change.", "right": "pull_request:44525"}], "summary": "These are two unrelated pull requests that happen to touch the same file in chat template utilities: one fixes AssistantTracker activation state with empty lists, the other prevents a KeyError in type-hint parsing for unions containing Any. They should not be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44524|pull_request:44525"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2640, "estimated_input_tokens": 1192, "item_count": 3, "node_count": 3, "serialized_chars": 4765, "soft_pair_count": 3}, "cached_at": "2026-04-14T14:43:52Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "900944858f218ea48082d91ba4b5161e50ea7cb54f5387085f55ce31d779cd54", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44528", "pull_request:44529", "pull_request:44552"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44529 is the best representative because it is merged and identical to 44528; 44552 is a separate revert change, not the primary fix/change.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44529 is the merged instance of the exact same diff as 44528, so it is the best canonical PR for this cluster.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44528", "reason": "Same file, same patch, same title; these are duplicate PRs for the exact same change, with 44529 being the merged copy.", "right": "pull_request:44529"}, {"accept": false, "left": "pull_request:44528", "reason": "44552 reverts the added line from 44528, so it is the inverse change rather than the same underlying fix/change.", "right": "pull_request:44552"}, {"accept": false, "left": "pull_request:44529", "reason": "44552 is a revert of the merged change in 44529, not a duplicate of the same PR content.", "right": "pull_request:44552"}], "summary": "44528 and 44529 are the same tiny code edit in the same file; 44529 is the merged duplicate. 44552 is a revert of that edit, so it is related but not the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44528|pull_request:44529", "pull_request:44528|pull_request:44552", "pull_request:44529|pull_request:44552"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 4158, "estimated_input_tokens": 1951, "item_count": 3, "node_count": 3, "serialized_chars": 7802, "soft_pair_count": 3}, "cached_at": "2026-04-14T14:45:28Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6e08837802c7299cc7496e4e22bebd6425844741aca18eae7581b7574257a7eb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44570", "pull_request:44618", "pull_request:45047"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44570 is the strongest representative because it is the most focused, directly addresses the tokenizer bug, and has the cleanest scope compared with 44618 and the mixed-scope 45047.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44570 is the best canonical PR: it is focused on the DeBERTa-v2 special-token regression, explicitly targets the linked issue, includes the relevant tokenizer and test changes, and was merged.", "confidence": 0.88, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44570", "reason": "Both PRs fix the same DeBERTa-v2 tokenizer special-token regression in the same code path; 44618 is an alternate implementation of the same underlying fix.", "right": "pull_request:44618"}, {"accept": false, "left": "pull_request:44618", "reason": "45047 is a mixed-scope PR that bundles the DeBERTa tokenizer change with an unrelated modeling_utils buffer fix, so it is not the same single concrete change.", "right": "pull_request:45047"}, {"accept": false, "left": "pull_request:44570", "reason": "Although 45047 includes a tokenizer fix for the same issue, it also contains an unrelated bug fix, so it should not be treated as the same PR change.", "right": "pull_request:45047"}], "summary": "The cluster centers on a DeBERTa-v2 tokenizer special-token regression, but one PR (45047) is a mixed-scope patch that also fixes an unrelated modeling_utils bug. The two focused tokenizer PRs (44570 and 44618) are duplicates/alternates for the same underlying tokenizer behavior."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is grounded in the packet: 44570 and 44618 both target issue #44568 and modify the same DeBERTa-v2 tokenizer/test files, while 45047 is clearly a mixed-scope PR that also includes an unrelated modeling_utils fix. The soft-edge verdicts are conservative overall and the canonical PR choice is reasonable."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:44570|pull_request:44618", "pull_request:44618|pull_request:45047", "pull_request:44570|pull_request:45047"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 14024, "estimated_input_tokens": 6884, "item_count": 9, "node_count": 9, "serialized_chars": 27533, "soft_pair_count": 24}, "cached_at": "2026-04-14T14:48:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "57383346965f96ca62128a07765b1de66d9baf571fa6f295c6bbc39f2bab9228", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44630", "pull_request:44648", "pull_request:44651", "pull_request:44664", "pull_request:44688", "pull_request:44691", "pull_request:44693", "pull_request:44714", "pull_request:44920"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44920 is the best representative of the underlying bug family because it directly addresses the Qwen3.5 label-propagation problem in the most targeted and comprehensive way.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44920 is the most complete and focused Qwen3.5-specific fix: it propagates num_labels/id2label/label2id into text_config at construction time, matching the reported bug without the extra broad changes seen in some other PRs.", "confidence": 0.84, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44648", "reason": "Same Qwen3.5 label-propagation bug; 44920 is a later, slightly more complete variant of the same fix.", "right": "pull_request:44920"}, {"accept": true, "left": "pull_request:44630", "reason": "Both fix the same underlying Qwen3.5 num_labels-to-text_config sync problem, just with different implementation approaches.", "right": "pull_request:44648"}, {"accept": true, "left": "pull_request:44630", "reason": "Same bug and same affected files; both are alternative implementations of propagating label settings into text_config.", "right": "pull_request:44920"}, {"accept": true, "left": "pull_request:44648", "reason": "44693 is a broader Qwen-label propagation patch that includes the same Qwen3.5 fix, so the underlying change overlaps.", "right": "pull_request:44693"}, {"accept": true, "left": "pull_request:44691", "reason": "Both target the Qwen3.5 text_config label propagation issue; 44693 is a broader superset of the same fix family.", "right": "pull_request:44693"}, {"accept": true, "left": "pull_request:44693", "reason": "Same Qwen3.5 label-propagation issue, with 44920 adding the same fix plus label-map propagation.", "right": "pull_request:44920"}, {"accept": true, "left": "pull_request:44714", "reason": "Both address the same Qwen-model label propagation problem in Qwen3.5-related config code, despite 44714 being broader.", "right": "pull_request:44920"}, {"accept": true, "left": "pull_request:44651", "reason": "Same underlying Qwen3.5 num_labels propagation bug; 44693 expands the same fix across more Qwen variants.", "right": "pull_request:44693"}, {"accept": true, "left": "pull_request:44688", "reason": "These are closely related broad Qwen label-propagation patches with overlapping Qwen3.5 config changes.", "right": "pull_request:44693"}, {"accept": false, "left": "pull_request:44648", "reason": "Different problems: 44648 fixes Qwen3.5 num_labels propagation, while 44664 is a broader multimodal sequence-classifier change.", "right": "pull_request:44664"}, {"accept": true, "left": "pull_request:44651", "reason": "Both include the same Qwen3.5 text_config label propagation fix, and 44688 is a broader superset across Qwen models.", "right": "pull_request:44688"}, {"accept": true, "left": "pull_request:44691", "reason": "Same fix family: Qwen label propagation into text_config, with 44714 broadening the scope to additional Qwen models.", "right": "pull_request:44714"}, {"accept": false, "left": "pull_request:44691", "reason": "44664 is about making generic sequence classification work for multimodal models, not the Qwen3.5 label-propagation bug.", "right": "pull_request:44664"}, {"accept": true, "left": "pull_request:44688", "reason": "Both are variants of the same Qwen label-propagation fix; 44688 just spans more models.", "right": "pull_request:44691"}, {"accept": true, "left": "pull_request:44688", "reason": "Overlapping Qwen3.5 config change for label propagation; 44920 is a focused refinement of the same bug fix.", "right": "pull_request:44920"}, {"accept": true, "left": "pull_request:44651", "reason": "Same underlying Qwen3.5 label-propagation issue, with 44714 being a broader Qwen-model patch.", "right": "pull_request:44714"}, {"accept": false, "left": "pull_request:44651", "reason": "Different code-path problem: 44651 fixes Qwen3.5 label syncing, while 44664 is a generic classifier/multimodal compatibility change.", "right": "pull_request:44664"}, {"accept": true, "left": "pull_request:44648", "reason": "Same Qwen3.5 num_labels propagation bug; 44691 extends the fix to related label-map handling.", "right": "pull_request:44691"}, {"accept": true, "left": "pull_request:44648", "reason": "Both are direct fixes for Qwen3.5 num_labels propagation to text_config.", "right": "pull_request:44651"}, {"accept": true, "left": "pull_request:44691", "reason": "Same Qwen3.5 label-related propagation bug, with 44920 being the later and more complete patch.", "right": "pull_request:44920"}, {"accept": true, "left": "pull_request:44630", "reason": "Both attempt to keep Qwen3.5 label state synchronized with text_config; implementation differs but bug is the same.", "right": "pull_request:44691"}, {"accept": true, "left": "pull_request:44630", "reason": "Same underlying Qwen3.5 propagation bug; 44630 uses a more invasive sync mechanism than 44651.", "right": "pull_request:44651"}, {"accept": true, "left": "pull_request:44688", "reason": "These are near-duplicate broad patches for Qwen label propagation across models, with heavy overlap in the same affected areas.", "right": "pull_request:44714"}], "summary": "This cluster is mostly a family of overlapping Qwen3.5/Qwen label-propagation fixes for sequence classification, with one adjacent but distinct multimodal classifier PR. The clearest canonical choice is the latest focused Qwen3.5 config fix that also propagates label maps into text_config."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44648|pull_request:44920", "pull_request:44630|pull_request:44648", "pull_request:44630|pull_request:44920", "pull_request:44648|pull_request:44693", "pull_request:44691|pull_request:44693", "pull_request:44693|pull_request:44920", "pull_request:44714|pull_request:44920", "pull_request:44651|pull_request:44693", "pull_request:44688|pull_request:44693", "pull_request:44648|pull_request:44664", "pull_request:44651|pull_request:44688", "pull_request:44691|pull_request:44714", "pull_request:44688|pull_request:44691", "pull_request:44688|pull_request:44920", "pull_request:44651|pull_request:44714", "pull_request:44651|pull_request:44664", "pull_request:44648|pull_request:44691", "pull_request:44648|pull_request:44651", "pull_request:44691|pull_request:44920", "pull_request:44651|pull_request:44691", "pull_request:44630|pull_request:44691", "pull_request:44630|pull_request:44651", "pull_request:44651|pull_request:44920", "pull_request:44688|pull_request:44714"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2738, "estimated_input_tokens": 1241, "item_count": 2, "node_count": 2, "serialized_chars": 4961, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:50:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f3748cbcd9902ecf60e7ced4b1bab16cde6763c5e5fd14322fb36a019ed8feba", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44685", "pull_request:45228"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#45228 is the strongest standalone PR in this cluster: it changes production code paths and configuration for tiny model creation across multiple models, whereas #44685 only patches test setup for a specific family.", "canonical_issue_reason": null, "canonical_pr_reason": "#45228 is the better representative of the cluster because it is the broader, more substantive code change; #44685 is a small, test-focused regression fix for a different problem.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44685", "reason": "Different underlying bugs/change sets. #44685 fixes missing `parent` passed into model tester constructors in PP-LCNet-related tests; #45228 updates tiny-model creation and image processor/config plumbing across several models. They do not fix the same concrete code path and should not be merged as duplicates.", "right": "pull_request:45228"}], "summary": "The two PRs are not duplicates: #44685 is a narrow test-only fix for missing `parent` wiring in PP-LCNet family model testers, while #45228 is a broader tiny-model-creation update spanning auto image processor registration, model configs, modular files, and tests."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44685|pull_request:45228"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1866, "estimated_input_tokens": 805, "item_count": 2, "node_count": 2, "serialized_chars": 3218, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:51:22Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2c08fbd7706795bb44a89e54b0f219da3b18af9d9e00b596f1b5894f91d00f90", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44712", "pull_request:45243"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45243 is the clearest exemplar of the cluster\u2019s shared pattern: a targeted update to the Nvidia CI Dockerfile for a newer torch/torchcodec pair.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45243 is the later and more current Dockerfile version bump in the same CI path, so it is the better representative for this cluster.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44712", "reason": "Reject: these are consecutive dependency bumps in the same Dockerfile, not the same fix or change; one updates 2.9\u21922.10 and the other 2.10\u21922.11.", "right": "pull_request:45243"}], "summary": "Two small PRs both bump the Nvidia CI Dockerfile, but they are sequential version updates (2.9\u21922.10 and 2.10\u21922.11), not the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44712|pull_request:45243"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7886, "estimated_input_tokens": 3815, "item_count": 6, "node_count": 6, "serialized_chars": 15259, "soft_pair_count": 7}, "cached_at": "2026-04-14T14:52:15Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "cc67ebb8c877dabd548957d0182d484af6fcc74f6b97e51234489f5eb58ae23b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44718", "pull_request:44723", "pull_request:44856", "pull_request:44916", "pull_request:44986", "pull_request:45039"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:44986 is the strongest single representative within one family because it is merged and fixes the same concrete Deberta-v2 Python 3.13 compatibility issue cleanly.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR across the full cluster; it splits into two unrelated duplicate families (Pixio interpolate_pos_encoding propagation and Deberta-v2 Python 3.13 comment/decorator ordering).", "confidence": 0.9, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44718", "reason": "Same Pixio bugfix: both propagate interpolate_pos_encoding through the embedding/model path, with the same issue target and overlapping files.", "right": "pull_request:44723"}, {"accept": false, "left": "pull_request:44718", "reason": "Different fixes in different subsystems: Pixio positional-encoding propagation vs Deberta-v2 Python 3.13 comment/decorator ordering.", "right": "pull_request:44986"}, {"accept": true, "left": "pull_request:44856", "reason": "Same Deberta-v2 Python 3.13 compatibility fix: moving 'Copied from' comments around @torch.jit.script in the same code path.", "right": "pull_request:44916"}, {"accept": true, "left": "pull_request:44856", "reason": "Same underlying Deberta-v2 comment/decorator ordering bug for Python 3.13, and the PRs touch the same helper definitions in the same file.", "right": "pull_request:44986"}, {"accept": true, "left": "pull_request:44916", "reason": "Same Deberta-v2 Python 3.13 compatibility change; both address the comment placement around @torch.jit.script for copied helpers.", "right": "pull_request:44986"}, {"accept": true, "left": "pull_request:44723", "reason": "Same Pixio change: propagate interpolate_pos_encoding through the model/embeddings, with matching target and overlapping files.", "right": "pull_request:45039"}, {"accept": true, "left": "pull_request:44718", "reason": "Same Pixio bugfix as the other Pixio PRs; this is the same concrete propagation change in the same files.", "right": "pull_request:45039"}], "summary": "The cluster actually contains two separate duplicate families: Pixio PRs about propagating interpolate_pos_encoding, and Deberta-v2 PRs about Python 3.13 @torch.jit.script/comment ordering. No single artifact is canonical across both themes."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44718|pull_request:44723", "pull_request:44718|pull_request:44986", "pull_request:44856|pull_request:44916", "pull_request:44856|pull_request:44986", "pull_request:44916|pull_request:44986", "pull_request:44723|pull_request:45039", "pull_request:44718|pull_request:45039"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 9278, "estimated_input_tokens": 4511, "item_count": 6, "node_count": 6, "serialized_chars": 18044, "soft_pair_count": 12}, "cached_at": "2026-04-14T14:53:16Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "45465768cda7a8907c1ae15f8177ffe1ae0ecc28c92ea0496674026ff9b854f1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44745", "pull_request:44753", "pull_request:44762", "pull_request:44782", "pull_request:44822", "pull_request:44824"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44782 is the best cluster representative because it is the only merged PR and most clearly captures the underlying XLNet bug fix without unrelated scope.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44782 is the strongest representative: it is merged, directly fixes the XLNet CPU/device placement bug in `relative_positional_encoding`, and is a complete, focused implementation of the shared change.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44745", "reason": "44745 is a broad mixed PR with many unrelated files and changes; it is not the same isolated XLNet device-placement fix as 44824.", "right": "pull_request:44824"}, {"accept": false, "left": "pull_request:44745", "reason": "44745 includes unrelated flex-attention and other model/config changes, so it is not a clean duplicate of the focused XLNet fix in 44762.", "right": "pull_request:44762"}, {"accept": true, "left": "pull_request:44753", "reason": "Both PRs make the same XLNet `torch.arange(..., device=...)` fix in `relative_positional_encoding`; they are the same underlying bug/change.", "right": "pull_request:44822"}, {"accept": true, "left": "pull_request:44753", "reason": "Same XLNet device-placement bug in `relative_positional_encoding`, same file, same fix class, and both target the same issue.", "right": "pull_request:44782"}, {"accept": true, "left": "pull_request:44753", "reason": "Despite different wording in the title, the diff shows the same XLNet `relative_positional_encoding` device fix, so they are duplicates of the same change.", "right": "pull_request:44762"}, {"accept": true, "left": "pull_request:44753", "reason": "Both PRs fix the same XLNet `relative_positional_encoding` device-placement problem with the same code-path change.", "right": "pull_request:44824"}, {"accept": true, "left": "pull_request:44762", "reason": "Both PRs address the same XLNet `relative_positional_encoding` device-placement bug in the same code path.", "right": "pull_request:44824"}, {"accept": true, "left": "pull_request:44762", "reason": "Same underlying XLNet fix: move `torch.arange` allocations onto the model device in `relative_positional_encoding`.", "right": "pull_request:44782"}, {"accept": true, "left": "pull_request:44762", "reason": "Same XLNet bug and same device-placement correction in `relative_positional_encoding`.", "right": "pull_request:44822"}, {"accept": true, "left": "pull_request:44782", "reason": "Both PRs implement the same XLNet `relative_positional_encoding` device fix and could plausibly be merged into one patch.", "right": "pull_request:44822"}, {"accept": true, "left": "pull_request:44782", "reason": "Same underlying bug/change: ensure XLNet positional aranges are created on the model device.", "right": "pull_request:44824"}, {"accept": true, "left": "pull_request:44822", "reason": "Both PRs are the same XLNet device-placement fix for `relative_positional_encoding`.", "right": "pull_request:44824"}], "summary": "The cluster is centered on the same XLNet `relative_positional_encoding` device-placement bug, with one broader outlier PR touching many unrelated files. The cleanest representative is the merged XLNet fix PR."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44745|pull_request:44824", "pull_request:44745|pull_request:44762", "pull_request:44753|pull_request:44822", "pull_request:44753|pull_request:44782", "pull_request:44753|pull_request:44762", "pull_request:44753|pull_request:44824", "pull_request:44762|pull_request:44824", "pull_request:44762|pull_request:44782", "pull_request:44762|pull_request:44822", "pull_request:44782|pull_request:44822", "pull_request:44782|pull_request:44824", "pull_request:44822|pull_request:44824"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1928, "estimated_input_tokens": 836, "item_count": 2, "node_count": 2, "serialized_chars": 3343, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:53:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "08009e02203aa832f79ad3344ceb8e1548d7a5374acc409002b0f0a4b4f7db51", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44783", "pull_request:44819"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44783 is the strongest standalone candidate because it directly addresses model-to-tokenizer resolution for DeepSeek v2/v3; #44819 is a narrower follow-up metadata adjustment, not the same change.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44783 is the more direct fix: it adds the missing DeepSeek v2/v3 AutoTokenizer mappings to LlamaTokenizer, which matches the bug being reported more concretely than the hub-class list tweak.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44783", "reason": "Same issue target and file, but different fixes: AutoTokenizer mapping vs hub tokenizer-class exception list. Related, but not the same underlying change or code-path bug.", "right": "pull_request:44819"}], "summary": "Both PRs target the same issue and same file, but they fix different tokenizer code paths: one adds AutoTokenizer model-to-class mappings, the other updates the hub tokenizer-class exception list. They are related, but not duplicates of the same concrete change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44783|pull_request:44819"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5578, "estimated_input_tokens": 2661, "item_count": 4, "node_count": 4, "serialized_chars": 10644, "soft_pair_count": 6}, "cached_at": "2026-04-14T14:53:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2e92a8251d4266dd6a68685dc4db74ff23f411b5e21659c0d5085633ebbba6f7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44785", "pull_request:44786", "pull_request:44791", "pull_request:44806"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44791 is the best overall candidate because it is the most polished and self-contained version of the shared fix, with explicit Strix Halo detection and caching rather than a narrower or more ad hoc heuristic.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44791 is the cleanest representative: it directly addresses the Strix Halo mmap/OOM problem, adds cached detection, and has the most complete/robust implementation among the candidates.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44785", "reason": "Same bug, same file, same issue target, and both disable mmap for Strix Halo in `core_model_loading.py`.", "right": "pull_request:44806"}, {"accept": true, "left": "pull_request:44785", "reason": "Both are alternative implementations of the same Strix Halo mmap/OOM fix in the same code path.", "right": "pull_request:44786"}, {"accept": true, "left": "pull_request:44785", "reason": "Same underlying change: detect Strix Halo and disable mmap to avoid OOM in model loading.", "right": "pull_request:44791"}, {"accept": true, "left": "pull_request:44786", "reason": "Same fix target and same affected code path; only the GPU-identification logic differs.", "right": "pull_request:44791"}, {"accept": true, "left": "pull_request:44786", "reason": "Both implement the same Strix Halo workaround in `core_model_loading.py` for the same issue.", "right": "pull_request:44806"}, {"accept": true, "left": "pull_request:44791", "reason": "Same underlying mmap workaround for Strix Halo OOMs, just expressed with slightly different detection logic.", "right": "pull_request:44806"}], "summary": "All four PRs target the same Strix Halo mmap/OOM bug in `src/transformers/core_model_loading.py` and all link issue 44756. They differ mainly in the GPU-detection heuristic and small implementation details, but the underlying fix is the same."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44785|pull_request:44806", "pull_request:44785|pull_request:44786", "pull_request:44785|pull_request:44791", "pull_request:44786|pull_request:44791", "pull_request:44786|pull_request:44806", "pull_request:44791|pull_request:44806"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2442, "estimated_input_tokens": 1093, "item_count": 2, "node_count": 2, "serialized_chars": 4372, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:54:22Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b00444956ad2ca5b3efc843dbc7a6205f0ed2062641dee4aabcacefc966679a3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44793", "pull_request:44814"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44814 is the best single PR to keep because it is smaller, focused, and directly addresses the key None-default crash path in Janus image generation. PR 44793 overlaps but adds additional, less clearly necessary logic around `generation_kwargs`.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44814 is the narrower, cleaner fix: it directly guards None for `max_length` and `num_return_sequences` in `modeling_janus.py` without the extra `generation_kwargs`/`boi_token_id` changes, so it is a better representative of the core image-generation None-handling patch.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44793", "reason": "Shared issue target and file are not enough here: 44793 patches `generation_kwargs`/`boi_token_id`, while 44814 patches `max_length`. They overlap on `num_return_sequences` but do not fix the same concrete bug, so treat as related but not duplicates.", "right": "pull_request:44814"}], "summary": "Both PRs target the same Janus image-generation None-handling issue and touch the same function, but they fix different missing-default paths rather than the same concrete change. They look more like overlapping follow-ups than true duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44793|pull_request:44814"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 9626, "estimated_input_tokens": 4685, "item_count": 7, "node_count": 7, "serialized_chars": 18739, "soft_pair_count": 9}, "cached_at": "2026-04-14T14:54:49Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b5117bbba84de12cc07f5b0faa384bfbc24c8a6e0f6c94539523b7d4b4c77291", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44823", "pull_request:44826", "pull_request:44838", "pull_request:44842", "pull_request:44892", "pull_request:44946", "pull_request:45197"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44892 is the best PR in the cluster because it addresses the concrete runtime failure for URL inputs in AutoImageProcessor, spans the needed helper utilities, and is backed by tests. The other code PRs are earlier or narrower variants of the same fix, while 44946 and 45197 are documentation-only and not suitable as the canonical code change.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44892 is the best representative of the URL-loading fix: it directly patches the relevant image-processing code path, adds supporting utility plumbing, and includes tests. It is more complete than the earlier narrower variants (44823/44826/44838/44842), all of which target the same underlying bug but are less finished.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44823", "reason": "Same underlying AutoImageProcessor URL-loading bug; both patch image-processing loading behavior for the same issue and are plausibly mergeable into one fix.", "right": "pull_request:44838"}, {"accept": true, "left": "pull_request:44823", "reason": "Both are competing implementations for the same URL-input failure in AutoImageProcessor, targeting the same bug and same user-facing behavior.", "right": "pull_request:44826"}, {"accept": false, "left": "pull_request:44946", "reason": "These are unrelated documentation updates; they do not fix the same concrete code-path problem and should not be merged as duplicates.", "right": "pull_request:45197"}, {"accept": true, "left": "pull_request:44838", "reason": "Both fix the same runtime URL-loading failure for AutoImageProcessor; 44892 is a fuller version of the same underlying change.", "right": "pull_request:44892"}, {"accept": true, "left": "pull_request:44842", "reason": "Both address the same image-processor-from-URL regression and could plausibly be combined into a single implementation.", "right": "pull_request:44892"}, {"accept": false, "left": "pull_request:44892", "reason": "44892 is a code fix for URL loading, while 44946 is a broad docstring cleanup; same issue target alone is not enough to treat them as the same change.", "right": "pull_request:44946"}, {"accept": false, "left": "pull_request:44826", "reason": "44826 is a runtime fix for URL loading, whereas 44946 is documentation-only and edits many docstrings; they are not the same concrete fix.", "right": "pull_request:44946"}, {"accept": false, "left": "pull_request:44823", "reason": "44823 implements the URL-loading bug fix, but 44946 only changes docstrings about deprecated URL input, so they are not duplicates.", "right": "pull_request:44946"}, {"accept": true, "left": "pull_request:44826", "reason": "Same bug, same target behavior, and both are alternative implementations of AutoImageProcessor loading from a URL.", "right": "pull_request:44838"}], "summary": "This cluster is mostly multiple competing implementations for the same AutoImageProcessor URL-loading bug (issue 44821), plus a separate docstring/docs PR that is not the same change. PR 44892 is the strongest canonical fix because it contains the most complete code-path handling and tests. The doc-only PRs 44946 and 45197 should not be grouped with the runtime bug fixes."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44823|pull_request:44838", "pull_request:44823|pull_request:44826", "pull_request:44946|pull_request:45197", "pull_request:44838|pull_request:44892", "pull_request:44892|pull_request:44946", "pull_request:44826|pull_request:44946", "pull_request:44823|pull_request:44946", "pull_request:44842|pull_request:44892", "pull_request:44826|pull_request:44838"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1874, "estimated_input_tokens": 809, "item_count": 2, "node_count": 2, "serialized_chars": 3233, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:55:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9c3313a8736ad46100e163d7c1766ff42ee59d1ca5fa5ebc07f6e2d34a0aab7f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44909", "pull_request:44932"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44909 is the best representative because it is merged, directly fixes the scheduler kwargs bug, and its diff matches #44932 almost exactly.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44909 is the better canonical PR because it is the earlier merged implementation of the exact same code change against the same target issue.", "confidence": 0.99, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44909", "reason": "Same issue target, same file, and the diff shows the same concrete fix to the inverse_sqrt scheduler path. These are duplicate PRs for the same underlying bug fix.", "right": "pull_request:44932"}], "summary": "Both pull requests implement the same fix in `src/transformers/optimization.py`: passing `scheduler_specific_kwargs` through the `INVERSE_SQRT` scheduler path. They target the same issue and have essentially identical diffs, so they are duplicates; the merged PR is the best canonical representative."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44909|pull_request:44932"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2284, "estimated_input_tokens": 1014, "item_count": 2, "node_count": 2, "serialized_chars": 4056, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:56:26Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "370605423da1d9e65e18b74bc6d9d0c2118cc4819d3f00c9ef7228a53dbe263a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44914", "pull_request:44915"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44915 is the strongest standalone PR in this cluster because it addresses the underlying tokenizer cleanup behavior broadly and is more representative of the shared theme than the Llama-only conversion tweak.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44915 is the better canonical representative because it changes the core tokenizer behavior at the framework level, whereas PR 44914 is a model-specific conversion workaround.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44914", "reason": "Different underlying changes: 44914 only sets `clean_up_tokenization_spaces=False` for Llama 3 tokenizer conversion, while 44915 alters the default cleanup behavior for BPE tokenizers in `PreTrainedTokenizerFast`. They touch related code but are not the same bug/fix and would not plausibly be merged as one PR.", "right": "pull_request:44915"}], "summary": "These PRs are related by tokenizer cleanup behavior, but they address different scopes: one is a narrow Llama 3 conversion fix, the other changes the default cleanup path for BPE tokenizers in PreTrainedTokenizerFast. They should not be triaged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44914|pull_request:44915"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2432, "estimated_input_tokens": 1088, "item_count": 2, "node_count": 2, "serialized_chars": 4351, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:56:41Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f0efd257e3823a06f41949c2113399e4a5a5afc9d82d114563ee23209769810d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44917", "pull_request:44985"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44985 is the best representative because it was merged and fixes the same underlying config serialization bug with the most complete scope.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44985 is the merged, broader implementation of the same rotary_pct persistence fix and includes the GPT-NeoX file plus the Japanese config variant.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44917", "reason": "Same concrete bug in GPT-NeoX rope parameter handling; #44985 is essentially an expanded version of #44917 and could plausibly be merged as one fix.", "right": "pull_request:44985"}], "summary": "Both pull requests address the same GPT-NeoX rotary_pct save/load persistence bug in the config conversion path; PR #44985 is the broader merged fix and also covers the Japanese variant."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44917|pull_request:44985"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2406, "estimated_input_tokens": 1075, "item_count": 2, "node_count": 2, "serialized_chars": 4300, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:57:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c7498be1ac5e606c86323c6b9efcae1b0f7689d2f522ae2c897b61ac4172ef36", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44924", "pull_request:44940"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No single best PR as a duplicate representative; if one must be chosen, PR #44924 is the more self-contained merged fix, but it is unrelated to PR #44940.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the two pull requests address different bugs in different files/code paths.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "PR #44924: Continuous batching thread safety", "reason": "Different underlying bugs: one changes CUDA graph capture mode in continuous batching, the other scopes tie_weights skipping with ContextVar during model initialization. They are not the same change and would not merge into one PR.", "right": "PR #44940: Fix tie_weights skipping logic is not tied to model thread scope"}], "summary": "These PRs are not duplicates: one fixes CUDA graph capture thread-safety in continuous batching, while the other fixes tie_weights suppression scoping during concurrent model initialization. They touch different subsystems and different code paths."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44924|pull_request:44940"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2472, "estimated_input_tokens": 1108, "item_count": 2, "node_count": 2, "serialized_chars": 4432, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:57:32Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4843fa8c3ad0cdf530741220215044a078fb36c362cc9bc6dc23d1ad347ae167", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44925", "pull_request:44951"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44951 is the best representative because it addresses the concrete code-path change rather than only describing usage.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44951 is the substantive implementation PR: it adds `router_logits` override support in model code and tests across MoE variants, directly enabling routing replay. PR 44925 is documentation-only.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44925", "reason": "Related to the same feature request, but not the same artifact type or concrete change: 44925 only adds docs/recipe text, while 44951 implements the router override in code. They should not be merged as duplicates.", "right": "pull_request:44951"}], "summary": "These PRs are related to the same MoE routing-replay feature request, but they are not duplicates: one is documentation for a workaround/recipe, the other is the actual code change adding router override support across MoE models."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44925|pull_request:44951"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5062, "estimated_input_tokens": 2403, "item_count": 4, "node_count": 4, "serialized_chars": 9611, "soft_pair_count": 4}, "cached_at": "2026-04-14T14:58:26Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9e516b80a426b9f087ee25f6328b0c5544cd1215ffc2e1a95758c36625820d52", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44978", "pull_request:45015", "pull_request:45043", "pull_request:45089"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45015 is the strongest single PR in the cluster because it most fully captures the fix with tests and an explicit issue link, while remaining the same concrete code-path change as the others.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45015 is the best canonical representative: it has the clearest scope for both attn/expert implementation helpers, includes the related test file, and explicitly targets issue #45003.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44978", "reason": "Same underlying fix in the same code path: replace direct `sys.modules[...]` access with `get()` and guard missing modules before `__file__` access. The later PR is just a slightly expanded variant with tests.", "right": "pull_request:45015"}, {"accept": true, "left": "pull_request:45015", "reason": "These are the same bugfix on the same functions in `modeling_utils.py`; the diffs differ mainly in wording and breadth, not in the underlying change.", "right": "pull_request:45089"}, {"accept": true, "left": "pull_request:45015", "reason": "Both PRs fix the same concrete KeyError risk from `sys.modules[cls.__module__]` in `modeling_utils.py` and would plausibly be merged as one patch.", "right": "pull_request:45043"}, {"accept": true, "left": "pull_request:45043", "reason": "They implement the same `sys.modules.get()` guard for the same helper methods in the same file, so they are duplicate bugfix variants rather than separate changes.", "right": "pull_request:45089"}], "summary": "All four PRs are near-duplicate variants of the same bugfix in `src/transformers/modeling_utils.py`: guard `sys.modules` lookup with `get()` and handle missing module entries before checking `__file__`. Three PRs also reference the same issue target and the diffs are functionally overlapping."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is grounded in the packet: all four PRs touch the same `modeling_utils.py` code path and make the same `sys.modules.get()` / `None` guard change, with some variants adding tests and an issue link. The pairwise mergeability judgments are conservative enough and consistent with the diffs shown."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:44978|pull_request:45015", "pull_request:45015|pull_request:45089", "pull_request:45015|pull_request:45043", "pull_request:45043|pull_request:45089"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2460, "estimated_input_tokens": 1102, "item_count": 2, "node_count": 2, "serialized_chars": 4407, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:58:49Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b405a65c42b357156b86a958492afa1db70fe6ced2a3b273cd5aff7a5790f7c3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44979", "pull_request:45363"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44979 is the best representative for the cluster because it defines the core abstraction rather than one specific kernel-fusion application.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44979 is the more foundational change: it introduces the reusable module-fusion API and tests, whereas #45363 is a separate kernel-config-based fusion implementation built on a different integration path.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "PR #44979: Module Fusion API", "reason": "Related theme, but not the same underlying bug/change. #44979 adds a general fusion API; #45363 implements a specific kernel-fusion pathway via KernelConfig and integration code.", "right": "PR #45363: n-to-1 kernel fusion via `KernelConfig`"}], "summary": "These PRs are related in the broad area of module/kernel fusion, but they are not the same change: #44979 adds a new generic Module Fusion API, while #45363 wires n-to-1 kernel fusion through KernelConfig and integration hooks. Different scope and code paths, so the soft duplicate link should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44979|pull_request:45363"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3116, "estimated_input_tokens": 1430, "item_count": 2, "node_count": 2, "serialized_chars": 5717, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:59:23Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5b3542b99e7c199b26dc82ecfc9516c293e4db2526800417ad061a6850b159e3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44996", "pull_request:45028"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#45028 is the broader umbrella PR for FSDP+TP integration, so it is the better representative of this topic, but it is not a duplicate of #44996.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical duplicate PR: they overlap in distributed utilities and config serialization, but the surrounding diffs and titles show different scopes and code paths.", "confidence": 0.77, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44996", "reason": "Reject: shared distributed config/tensor-parallel plumbing is not enough; #44996 is a narrower merged refactor, while #45028 expands into broader TP/FSDP integration and model-specific changes, so they are not the same concrete fix/change.", "right": "pull_request:45028"}], "summary": "The two PRs are closely related distributed-training refactors, but they are not the same change: #44996 is a merged, narrower from_pretrained/FSDP2+TP refactor, while #45028 is a broader open TP/FSDP integration refactor that also touches model configs and additional integrations."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44996|pull_request:45028"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5868, "estimated_input_tokens": 2806, "item_count": 3, "node_count": 3, "serialized_chars": 11224, "soft_pair_count": 3}, "cached_at": "2026-04-14T14:59:42Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5f12e1e13fab2fdcef38d5ee25eff3965fec9b9735e33c51579ec78eeab3934d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44997", "pull_request:44999", "pull_request:45133"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45133 is the strongest single PR to keep as the cluster representative: it targets the same Sarvam model implementation as the others, is current/open, and looks like the most complete version of the shared change.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45133 is the best canonical representative because it is the latest and only open PR, and its diff appears to be the most developed iteration of the same Sarvam model addition.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44997", "reason": "Same title, same files, and same Sarvam model addition; this looks like an earlier/later revision of the same PR rather than a distinct change.", "right": "pull_request:44999"}, {"accept": true, "left": "pull_request:44997", "reason": "Same feature, same touched files, and overlapping implementation; the later PR is a plausible continuation of the earlier draft.", "right": "pull_request:45133"}, {"accept": true, "left": "pull_request:44999", "reason": "These are the same Sarvam model implementation PR with only revision differences, so they should be clustered together.", "right": "pull_request:45133"}], "summary": "All three pull requests are the same feature branch/iteration for adding the Sarvam model, touching the same files and code path. They differ only by revision size and status, so they should be clustered together as duplicates/near-duplicates rather than separate changes."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44997|pull_request:44999", "pull_request:44997|pull_request:45133", "pull_request:44999|pull_request:45133"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3682, "estimated_input_tokens": 1713, "item_count": 2, "node_count": 2, "serialized_chars": 6851, "soft_pair_count": 1}, "cached_at": "2026-04-14T14:59:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8a527c1050e86a92953b43e88dde3acb5640f6d6f8cee5884acc75fa1caa2c72", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45010", "pull_request:45077"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45077 is the best standalone candidate: it is open, recent, and its diff is a coherent batch of workflow pinning/secret extraction changes.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45077 is the better representative because it is the newer open workflow-hardening batch and appears more current than the closed #45010, while still covering the same general maintenance area.", "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45010", "reason": "Same broad CI/workflow hardening theme, but not the same underlying change. The PRs pin different actions across different workflow files and extract different env vars/secrets, so they would not plausibly be merged into one identical fix.", "right": "pull_request:45077"}], "summary": "Both pull requests are broad GitHub Actions hardening batches, but they are not the same change set: each pins a different collection of workflows/actions and extracts different unsafe expressions. They overlap in many filenames, yet the concrete fixes differ enough that they should not be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45010|pull_request:45077"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3216, "estimated_input_tokens": 1480, "item_count": 2, "node_count": 2, "serialized_chars": 5920, "soft_pair_count": 1}, "cached_at": "2026-04-14T15:00:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6c4a68bf2f7795f6e94f8b7a61f521544a4e4facb1939712a6f04badc6c429fc", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45045", "pull_request:45060"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:45045 is the best representative because it is merged, directly targets the bug in the implementation layer, and appears to be the primary resolution for issue 45042.", "canonical_issue_reason": null, "canonical_pr_reason": "pull_request:45045 is the merged, more direct fix for the reported PIL backend torchvision dependency issue and touches the concrete image-processing code paths across affected models.", "confidence": 0.78, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45045", "reason": "They share the same broad theme and issue target, but they modify different code paths: 45045 removes torchvision assumptions from PIL image processors, while 45060 changes import-utils dependency detection for PIL/video backends. Related fixes, not the same change.", "right": "pull_request:45060"}], "summary": "Both PRs are about reducing unwanted torchvision coupling in PIL-backed image processing, but they fix different layers of the problem: one edits backend/model code, the other adjusts import-time dependency inference and tests. They are related, not duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45045|pull_request:45060"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2828, "estimated_input_tokens": 1286, "item_count": 2, "node_count": 2, "serialized_chars": 5142, "soft_pair_count": 1}, "cached_at": "2026-04-14T15:02:32Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "de3a70291dc5269be9bc770c8776d85a6df4b44895a31553571dbbc4b1698cba", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45066", "pull_request:45076"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45076 is the best single PR to point at for this cluster since it appears more inclusive than #45066, while still staying within the same general CLI-advisor area.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45076 is the slightly broader representative of the cluster because it touches the shared advisor/compress files plus an extra hardware utility file, but the overlap is not strong enough to treat it as a confirmed superset.", "confidence": 0.58, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45066", "reason": "The evidence shows high-level similarity and shared filenames, but not enough to establish the same underlying code change or a clean superset/subset relationship. For PRs, that is too weak to merge them as duplicates.", "right": "pull_request:45076"}], "summary": "These two closed PRs are clearly related in theme and overlap on files, but the packet does not prove they are the same mergeable PR change. They look like adjacent CLI feature work with shared filenames, not a confirmed duplicate pair."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": false, "feedback": "The linkage is too strong for the evidence provided. The packet only shows soft similarity and overlapping filenames; it does not establish that these are the same underlying PR work or a clean superset/subset relationship. The summary\u2019s claim of \u2018near-duplicate feature branches\u2019 and the soft-edge acceptance of the pair are therefore overconfident. These look related at a high level, but not clearly mergeable as one PR for the same bugfix based on the packet alone."}, "evaluator_used": true, "retried": true}, "soft_pairs": ["pull_request:45066|pull_request:45076"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2328, "estimated_input_tokens": 1036, "item_count": 2, "node_count": 2, "serialized_chars": 4144, "soft_pair_count": 1}, "cached_at": "2026-04-14T15:02:48Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ed14fa81ab3f0bb91e9f168a6c48fca27f0aa5cfb842fd7a27dac948a6929bc5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45080", "pull_request:45129"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45080 is the strongest candidate for the cluster because it addresses the bug in a more targeted way and is the more plausible standalone fix for the underlying schema-generation problem.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45080 is the better canonical representative: it directly fixes Pydantic field handling with a class schema hook and preserves the original `dtype` annotation semantics, rather than broadening the type to `Any`.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45080", "reason": "Both PRs reference the same issue, but they are different fix strategies for the same symptom, not the same concrete code change. One adds a Pydantic schema hook; the other changes the `dtype` annotation to `Any`. They are alternatives, not mergeable duplicates.", "right": "pull_request:45129"}], "summary": "Two closed PRs target the same Pydantic-schema issue in `PreTrainedConfig`, but they implement different fixes: one adds a custom Pydantic core schema hook, the other relaxes `dtype` to `Any`. They are related alternatives, not the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45080|pull_request:45129"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2464, "estimated_input_tokens": 1104, "item_count": 2, "node_count": 2, "serialized_chars": 4414, "soft_pair_count": 1}, "cached_at": "2026-04-14T15:03:13Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1ac21ad2db6f2788689b7121f2b3f629403d0c89f0eddf73e9517e3b0cb1e55d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45086", "pull_request:45317"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45317 is the best representative of this change because it is the more complete implementation of the same bug fix and is the one most ready to merge.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45317 is the stronger canonical PR: it contains the same fix as 45086, adds tests, and has more review/discussion activity.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45086", "reason": "Same underlying bug and same code-path fix in the same file; 45317 is effectively an expanded version of 45086 with tests.", "right": "pull_request:45317"}], "summary": "Both pull requests address the same concrete bug in `_patch_mistral_regex`: an `AttributeError` from using `tokenizer.backend_tokenizer.pre_tokenizer` instead of `tokenizer.pre_tokenizer`. The second PR is the more complete version because it carries the same code fix plus tests."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45086|pull_request:45317"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3526, "estimated_input_tokens": 1635, "item_count": 3, "node_count": 3, "serialized_chars": 6539, "soft_pair_count": 3}, "cached_at": "2026-04-14T15:04:49Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b7e305c142acf4e0b5c2c3a0c433f9bab694e6f4ccda45067b9b28011811b113", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45087", "pull_request:45119", "pull_request:45240"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45240 is the strongest candidate because it is merged, explicitly tied to the issue, and provides the most complete type-checker-facing fix for the affected code path.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45240 is the merged, final implementation for #45071 and directly addresses the typing issue on `PreTrainedConfig` with a class-level `dataclass_transform` annotation.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45087", "reason": "Both PRs fix the same `wrap_init_to_accept_kwargs` type-checking problem in `configuration_utils.py` for the same tracked issue; they are alternative solutions to the same underlying bug.", "right": "pull_request:45119"}, {"accept": true, "left": "pull_request:45119", "reason": "Both target the same mypy/typing defect in `PreTrainedConfig` and modify the same code path; despite different implementations, they are clearly duplicate attempts at the same fix.", "right": "pull_request:45240"}, {"accept": true, "left": "pull_request:45087", "reason": "These are two versions of the same fix for the `PreTrainedConfig` typing issue in the same file, aimed at the same underlying bug and plausibly collapsible into one PR.", "right": "pull_request:45240"}], "summary": "All three PRs target the same mypy/type-checking problem around `PreTrainedConfig` in `configuration_utils.py`, with #45240 appearing to be the final merged fix."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is grounded in the packet: all three PRs explicitly target issue #45071, touch the same file, and are plausibly alternative fixes for the same mypy/type-checking problem. The note that #45240 is the merged final fix is supported. The soft-edge accepts are also conservative enough here given the shared issue target and overlapping code path."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:45087|pull_request:45119", "pull_request:45119|pull_request:45240", "pull_request:45087|pull_request:45240"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2676, "estimated_input_tokens": 1210, "item_count": 2, "node_count": 2, "serialized_chars": 4840, "soft_pair_count": 1}, "cached_at": "2026-04-14T15:05:44Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8fc76226bb9718ae474933799a01c48b920034b8e4cb7cdcdf6793a993b91822", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45088", "pull_request:45091"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45091 best captures the underlying fix: it corrects the `_get_feat_extract_output_lengths` calculation in the Qwen3 Omni MoE audio path.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45091 is the cleaner canonical representative because its title names the exact function being fixed, and its diff is identical to #45088.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45088", "reason": "Same exact code-path fix, same touched files, same diff, and same issue target; these are merge-equivalent duplicate PRs.", "right": "pull_request:45091"}], "summary": "Two closed pull requests are effectively duplicates: both patch the same Qwen3 Omni MoE audio feature-length formula in the same files with the same diff and the same issue target."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The pair looks mergeable as a single bugfix PR: both PRs target the same issue (45083), touch the same Qwen3 Omni MoE files, and the visible diff previews show the same `_get_feat_extract_output_lengths` fix. The duplicate/merge-equivalent characterization is grounded. Minor caution: phrase the diffs as matching based on the provided previews rather than asserting absolute identity beyond the packet."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:45088|pull_request:45091"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2478, "estimated_input_tokens": 1111, "item_count": 2, "node_count": 2, "serialized_chars": 4442, "soft_pair_count": 1}, "cached_at": "2026-04-14T15:05:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4bbd3dddd2d890fc679c2c415b24706f5e537247ad591c5f62cd40c20ad8deb5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45104", "pull_request:45105"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45105 is the strongest representative: same bug, same file, more robust implementation, and tests; 45104 looks like an earlier partial version of the same fix.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45105 is the best canonical PR because it addresses the same concrete crash path as 45104, resolves annotations once for the loop, and adds test coverage.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45104", "reason": "Same underlying crash in `src/transformers/utils/auto_docstring.py` from string annotations; 45105 is a fuller version of the same fix and could plausibly subsume 45104.", "right": "pull_request:45105"}], "summary": "Both PRs target the same auto_docstring crash caused by `from __future__ import annotations` in `_process_kwargs_parameters`; PR 45105 is the more complete fix and includes tests, while 45104 is a narrower earlier variant."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45104|pull_request:45105"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1838, "estimated_input_tokens": 791, "item_count": 2, "node_count": 2, "serialized_chars": 3161, "soft_pair_count": 1}, "cached_at": "2026-04-14T15:06:23Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b6e3ac752735cff827684df4079477e346d24ff6cf5b2de38ab37fcb2a981d00", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45107", "pull_request:45108"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "`pull_request:45107` is the most substantive and user-facing change: it fixes an actual pipeline crash caused by `None` values in generation config. `pull_request:45108` is narrower and only changes a config field type.", "canonical_issue_reason": null, "canonical_pr_reason": "No strong canonical PR for a duplicate cluster here; if forced, `pull_request:45107` is the better representative because it fixes a concrete runtime failure, while `pull_request:45108` is just a type annotation adjustment in a separate model config.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45107", "reason": "Reject: these are not the same underlying bug or change. `45107` filters `None` values before updating a pipeline config to avoid a crash; `45108` changes the declared type of `Wav2Vec2Config.vocab_size` to allow `None`. They touch different files, different models, and could not plausibly be merged as one PR.", "right": "pull_request:45108"}], "summary": "Two merged PRs with only a broad thematic overlap around `None` handling, but they address different code paths and different kinds of changes: one fixes a runtime crash in the text-to-audio pipeline, the other loosens a Wav2Vec2 config type annotation."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45107|pull_request:45108"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2676, "estimated_input_tokens": 1210, "item_count": 2, "node_count": 2, "serialized_chars": 4839, "soft_pair_count": 1}, "cached_at": "2026-04-14T15:06:40Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "58881f2a3ae13ce14c8c17cd40d7accefe69fb62b498e972c16bc52cd03f7930", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45158", "pull_request:45159"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45158 is the strongest cluster representative: it is the merged implementation of the Turkish docs addition, while #45159 is an overlapping, unmerged variant of the same change.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45158 is the best canonical representative because it is merged, directly introduces the Turkish Get Started docs, and covers the same translation files/workflow area as the other PR.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45158", "reason": "Both PRs add the Turkish Get Started documentation and touch the same doc files/workflow. They look like the same underlying translation change and could plausibly be combined into one PR.", "right": "pull_request:45159"}], "summary": "Both pull requests are near-duplicates for the same Turkish \u2018Get Started\u2019 documentation addition. The first PR is the merged version; the second adds the same docs content plus PR-doc workflow changes, so they represent one underlying change cluster."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45158|pull_request:45159"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2354, "estimated_input_tokens": 1049, "item_count": 2, "node_count": 2, "serialized_chars": 4193, "soft_pair_count": 1}, "cached_at": "2026-04-14T15:06:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d8b5ae3184ccf1ff90897b031608ecdcedbd1c49129f7da47cc83aacdbb3efcf", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45266", "pull_request:45267"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "Neither PR is a strong canonical representative of a duplicate set; if one must be chosen, 45266 is slightly more self-contained, but the two changes are unrelated enough that no canonical PR is warranted.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45266", "reason": "Different models and different intended changes (Albert vs DistilBERT); no shared filenames, targets, or concrete code-path bug. They are not the same underlying fix.", "right": "pull_request:45267"}], "summary": "These PRs are both small model-specific cleanup/docstring changes, but they touch different codepaths and different models, so they are not duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45266|pull_request:45267"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 4246, "estimated_input_tokens": 1995, "item_count": 3, "node_count": 3, "serialized_chars": 7978, "soft_pair_count": 3}, "cached_at": "2026-04-14T15:08:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7841b90fb7defa716718f3fe674524350069a584f27185a0f27608ff28f7d303", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45309", "pull_request:45348", "pull_request:45383"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45348 is the best cluster representative because it is merged and directly targets the underlying KeyError in apply_chat_template without the broader, noisier surface of 45309.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45348 is the strongest canonical PR: it is merged, narrowly fixes the missing-content crash in apply_chat_template, and is the cleanest representative of the bug family.", "confidence": 0.82, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45309", "reason": "Same underlying apply_chat_template missing-content crash; 45348 looks like a narrower, merged version of 45309's fix.", "right": "pull_request:45348"}, {"accept": true, "left": "pull_request:45309", "reason": "Both address the same missing-content crash path in apply_chat_template, with 45383 applying the same guard in a different branch of the same function.", "right": "pull_request:45383"}, {"accept": true, "left": "pull_request:45348", "reason": "Both are guarding apply_chat_template against absent message content and are plausibly mergeable into one fix for the same bug.", "right": "pull_request:45383"}], "summary": "All three PRs are variants of the same apply_chat_template missing-content KeyError fix; 45348 is the best representative because it is merged and directly addresses the crash with a focused patch."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is grounded in the packet: all three PRs target issue 45290, and their titles/diffs support a shared missing-content crash path around `apply_chat_template`. The pairwise mergeability judgments are plausible and conservative enough based on the shared files and issue target."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:45309|pull_request:45348", "pull_request:45309|pull_request:45383", "pull_request:45348|pull_request:45383"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2766, "estimated_input_tokens": 1255, "item_count": 2, "node_count": 2, "serialized_chars": 5019, "soft_pair_count": 1}, "cached_at": "2026-04-14T15:08:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ee8ad34e504847e201d85ef5267ad7f8c4883eb52ba83aaecef702507ee400ab", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45314", "pull_request:45361"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45361 is the clearer standalone conversion enhancement: it adds explicit CLIP-like vision-model mappings for VLMs and includes targeted tests. PR #45314 is a separate fix for gemma3n/qwen3_5 text class loading.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical duplicate PR here; the PRs solve different underlying conversion paths and should not be merged as duplicates.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45314", "reason": "Different underlying changes: #45314 removes/adjusts text conversion mappings for gemma3n/qwen3_5 LLM loading from VLM checkpoints, while #45361 adds vision-model mappings for CLIP-like models in VLM conversion. They share a file but not the same bug or mergeable fix.", "right": "pull_request:45361"}], "summary": "These PRs overlap in `conversion_mapping.py` but address different conversion problems: one removes text-side mappings for VLM checkpoint loading, the other adds vision-model mappings for CLIP-like VLM conversion."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45314|pull_request:45361"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2950, "estimated_input_tokens": 1347, "item_count": 2, "node_count": 2, "serialized_chars": 5385, "soft_pair_count": 1}, "cached_at": "2026-04-14T15:09:26Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ea820b2fea406d03b74407f79feca45299e35bf2447722088abf7883af022ebb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45332", "pull_request:45333"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:45332 is the broader, more central change: it extends heterogeneous support into cache/masking/modeling code paths, so it is the best representative PR for the cluster\u2019s overall feature work.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45332", "reason": "Different scopes: 45333 is config-only per-layer configuration, while 45332 implements model/runtime support using that config. Shared heterogeneity plumbing is not enough to treat them as duplicates.", "right": "pull_request:45333"}], "summary": "Two related but distinct PRs: one adds heterogeneous per-layer config plumbing, the other adds broader heterogeneous model/runtime support. They overlap in the new feature area and shared helper files, but they are not the same concrete change set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45332|pull_request:45333"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2618, "estimated_input_tokens": 1181, "item_count": 2, "node_count": 2, "serialized_chars": 4722, "soft_pair_count": 1}, "cached_at": "2026-04-14T15:09:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9dcd2da671c88f8f87ce0d9fd8bb624ce61608dc88450e63f9003bca7a13f0f4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45379", "pull_request:45380"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45380 is the best representative fix because it directly links the issue and covers the shared deepstack_visual_indexes config plumbing more completely than 45379.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45380 is the better canonical PR: it explicitly targets issue #45375 and includes the same Qwen3_5_Moe config fix as 45379, plus the related Qwen3_5 vision config/modular updates.", "confidence": 0.8, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45379", "reason": "Same underlying config bug: both add deepstack_visual_indexes to Qwen3_5_MoeVisionConfig, and 45380 can plausibly subsume 45379 as one merged change.", "right": "pull_request:45380"}], "summary": "Both PRs address the same deepstack_visual_indexes omission in Qwen3_5/Qwen3_5_Moe vision config handling; PR 45380 is the broader, more complete fix and subsumes the narrower 45379 change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45379|pull_request:45380"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2664, "estimated_input_tokens": 1204, "item_count": 2, "node_count": 2, "serialized_chars": 4816, "soft_pair_count": 1}, "cached_at": "2026-04-14T15:10:02Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e1b5599c612fdb48075cfe8ca3c27cb3f49963c73387de812a026c749d53dd67", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45395", "pull_request:45414"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45414 is the strongest representative: it was merged, targets the same issue, and appears to carry the same code/doc changes as #45395.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45414 is the best canonical PR because it is the merged instance of the duplicate fix and matches the same title, issue target, and touched files as the other PR.", "confidence": 0.99, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45395", "reason": "Same PR title, same explicit issue target, same shared filenames, and highly similar diff preview indicate the same underlying change; the merged PR #45414 is the canonical instance.", "right": "pull_request:45414"}], "summary": "Two pull requests appear to be duplicates of the same fix for the DeepSpeed ZeRO-3 rotary-kernel IndexError; one was merged and the other is an unmerged twin with the same title, target issue, and file set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45395|pull_request:45414"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7820, "estimated_input_tokens": 3782, "item_count": 18, "node_count": 18, "serialized_chars": 15126, "soft_pair_count": 16}, "cached_at": "2026-04-14T15:17:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3dcf32dc5f2f4a63dd593908490dea26ddaba69fea4ce3e5d099d063da4982cb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30277", "issue:34634", "issue:35545", "issue:36010", "issue:41720", "issue:42915", "issue:43596", "issue:43638", "issue:43716", "issue:43828", "issue:44292", "issue:44509", "issue:44512", "issue:44661", "issue:44805", "issue:44918", "issue:45161", "issue:45237"], "result": {"analyst_result": {"best_issue_reason": "Issue 43596 is the best issue to anchor the cluster because it matches the accepted duplicate pair and captures the shared Deepspeed ZeRO3/BertModel index-out-of-bounds bug most directly.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43596 is the cleanest representative of the only true duplicate subgroup: it reports the ZeRO3 + BertModel IndexError clearly and early, and 43638 describes the same failure mode with only wording changes.", "canonical_pr_reason": null, "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "issue:43716", "reason": "Both mention Qwen/GPT-OSS-style failures, but one is an image preprocessor dtype mismatch and the other is an AMD GPU runtime issue; different bug shapes and likely different code paths.", "right": "issue:45237"}, {"accept": false, "left": "issue:43828", "reason": "Both mention dtype problems, but one is an autocast mismatch in Phi-tiny-MoE and the other is a GPT-OSS AMD GPU failure; too broad to be the same bug.", "right": "issue:45237"}, {"accept": false, "left": "issue:44292", "reason": "Different concrete failures: NVFP4 execution error versus an AMD GPU compatibility/runtime issue. Same model family is not enough to merge.", "right": "issue:45237"}, {"accept": false, "left": "issue:30277", "reason": "Jamba-v01 + ZeRO3 collective mismatch and ModernBERT ONNX export error are unrelated subsystems and failure modes.", "right": "issue:35545"}, {"accept": false, "left": "issue:35545", "reason": "ModernBERT ONNX export and Qwen3Moe FineGrainedFP8Config failures are different code paths and different kinds of errors.", "right": "issue:42915"}, {"accept": false, "left": "issue:35545", "reason": "ONNX export failure versus Qwen3 auto device mapping CUDA assert; same broad area of model usage, but not the same underlying bug.", "right": "issue:41720"}, {"accept": false, "left": "issue:44509", "reason": "Both are docs regressions in v5, but they concern different removed topics/commands and are separate documentation issues.", "right": "issue:44512"}, {"accept": false, "left": "issue:43638", "reason": "Both are IndexError-shaped reports, but one is a ZeRO3/BertModel size-0 failure and the other is a mask-shape mismatch; not the same concrete bug.", "right": "issue:44805"}, {"accept": false, "left": "issue:35545", "reason": "An ONNX export problem is unrelated to the GenerationMixin import error.", "right": "issue:36010"}, {"accept": false, "left": "issue:43596", "reason": "Different failures: ZeRO3/BertModel index-out-of-bounds versus an indexed tensor/mask shape mismatch. Too generic to unify.", "right": "issue:44805"}, {"accept": false, "left": "issue:44805", "reason": "A mask/tensor shape mismatch during indexing is not the same as 'Only TP not working with GPT-OSS MoE model'.", "right": "issue:45161"}, {"accept": false, "left": "issue:44805", "reason": "The SFT trainer embedding unpacking issue is a different integration bug from the mask/tensor shape mismatch.", "right": "issue:44918"}, {"accept": false, "left": "issue:44661", "reason": "add-new-model-like failing inside TOKENIZER_MAPPING_NAMES is a registry/CLI issue, not the same as the reported indexing mask mismatch.", "right": "issue:44805"}, {"accept": false, "left": "issue:30277", "reason": "Both involve distributed training and runtime errors, but one is a collective mismatch with Jamba-v01 while the other is a ZeRO3/BertModel index error; different underlying bugs.", "right": "issue:43638"}, {"accept": false, "left": "issue:34634", "reason": "BarkProcessor voice_preset and ModernBERT ONNX export are unrelated model/processor issues.", "right": "issue:35545"}, {"accept": true, "left": "issue:43596", "reason": "These describe the same Deepspeed ZeRO3 + BertModel index-out-of-bounds failure, with only minor wording/model-pretraining differences.", "right": "issue:43638"}], "summary": "This cluster is mostly heterogeneous: most items are unrelated model/runtime/docs issues that only share broad wording like dtype mismatch, index error, or deepspeed/transformers. The only clear duplicate pair is the ZeRO3 + BertModel IndexError reports (43596 and 43638)."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43716|issue:45237", "issue:43828|issue:45237", "issue:44292|issue:45237", "issue:30277|issue:35545", "issue:35545|issue:42915", "issue:35545|issue:41720", "issue:44509|issue:44512", "issue:43638|issue:44805", "issue:35545|issue:36010", "issue:43596|issue:44805", "issue:44805|issue:45161", "issue:44805|issue:44918", "issue:44661|issue:44805", "issue:30277|issue:43638", "issue:34634|issue:35545", "issue:43596|issue:43638"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7102, "estimated_input_tokens": 3423, "item_count": 17, "node_count": 17, "serialized_chars": 13689, "soft_pair_count": 12}, "cached_at": "2026-04-14T15:17:51Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "64b0e6ce139efb04f10500204d061627e5862313873c8e801f0fbe1dd8612ebd", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:24643", "issue:30277", "issue:34928", "issue:36331", "issue:39290", "issue:43039", "issue:43425", "issue:43596", "issue:43638", "issue:43828", "issue:43975", "issue:44292", "issue:44457", "issue:44805", "issue:45081", "issue:45161", "issue:45237"], "result": {"analyst_result": {"best_issue_reason": "issue:34928 is the best representative issue overall because it is specific, well-scoped, and has substantial discussion activity.", "best_pr_reason": null, "canonical_issue_reason": "issue:34928 is the strongest standalone report: it is detailed, highly discussed, and describes a concrete FSDP + activation checkpointing tensor-size mismatch with a clear failure mode.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43975", "reason": "Different problems: one is a v5 detokenization bug for DeepSeek Coder, the other is LoRA merge/save/reload producing inconsistent outputs.", "right": "issue:44457"}, {"accept": false, "left": "issue:43039", "reason": "Both touch model-parallel/MoE workflows, but the concrete bugs differ: Liger kernel cross-entropy dispatch vs GPT-OSS tensor-parallel failure.", "right": "issue:45161"}, {"accept": false, "left": "issue:44292", "reason": "Unrelated failure modes: Qwen-3-8B-NVFP4 runtime error versus LoRA weight merge/reload output mismatch.", "right": "issue:44457"}, {"accept": false, "left": "issue:34928", "reason": "Both are shape/mismatch-style errors, but they occur in different code paths and describe different root causes (FSDP checkpointing vs mask indexing).", "right": "issue:44805"}, {"accept": false, "left": "issue:24643", "reason": "Both involve DeepSpeed, but one is a linear-layer weight dimensionality error and the other is a collective mismatch in ZeRO3; not the same bug.", "right": "issue:30277"}, {"accept": false, "left": "issue:43975", "reason": "Different scope: detokenization bug for DeepSeek Coder versus GPT-OSS-20B failing on AMD GPUs.", "right": "issue:45237"}, {"accept": false, "left": "issue:43638", "reason": "Different underlying issues: BertModel/ZeRO3 index-out-of-bounds during init versus GPT-OSS MoE tensor-parallel not working.", "right": "issue:45161"}, {"accept": false, "left": "issue:34928", "reason": "Completely different problems: activation-checkpointing/FSDP tensor-size mismatch versus a Trainer API signature break.", "right": "issue:36331"}, {"accept": false, "left": "issue:43596", "reason": "BertModel ZeRO3 init index error is not the same as GPT-OSS MoE tensor-parallel failure.", "right": "issue:45161"}, {"accept": false, "left": "issue:39290", "reason": "Both are tokenizer/model-adaptation issues, but they affect different models and different failing attributes, so they are not the same bug.", "right": "issue:45081"}, {"accept": false, "left": "issue:43828", "reason": "Both mention MoE/parallelism, but the concrete failures differ: autocast dtype mismatch versus tensor-parallel setup not working.", "right": "issue:45161"}, {"accept": false, "left": "issue:43425", "reason": "A broad Torch-version incompatibility is not the same as a specific Qwen NVFP4 runtime error.", "right": "issue:44292"}], "summary": "No duplicate cluster emerges here: the items span unrelated bugs across DeepSpeed/FSDP, tokenizer/config issues, model-specific runtime failures, and trainer API incompatibilities. None of the soft pairs look like the same underlying bug or fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43975|issue:44457", "issue:43039|issue:45161", "issue:44292|issue:44457", "issue:34928|issue:44805", "issue:24643|issue:30277", "issue:43975|issue:45237", "issue:43638|issue:45161", "issue:34928|issue:36331", "issue:43596|issue:45161", "issue:39290|issue:45081", "issue:43828|issue:45161", "issue:43425|issue:44292"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7778, "estimated_input_tokens": 3761, "item_count": 18, "node_count": 18, "serialized_chars": 15042, "soft_pair_count": 15}, "cached_at": "2026-04-14T15:19:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c09c5b7cb99963346ced73f8cedea962f398c2bf06b79213c17b3b9ffb043107", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30277", "issue:34928", "issue:36010", "issue:36331", "issue:41720", "issue:41762", "issue:43039", "issue:43541", "issue:43638", "issue:43716", "issue:43827", "issue:43828", "issue:44292", "issue:44512", "issue:44560", "issue:44805", "issue:44918", "issue:45161"], "result": {"analyst_result": {"best_issue_reason": "Issue 43638 is the best representative issue here because it captures the shared ZeRO-3 zero-sized-index failure more generally and is the strongest anchor for the only real duplicate set.", "best_pr_reason": null, "canonical_issue_reason": "Use issue 43638 as the canonical issue for the ZeRO-3 duplicate pair: it describes the generic \"index 0 is out of bounds\" failure under DeepSpeed ZeRO-3 and is broader than the Gemma3-specific loading report in 41762.", "canonical_pr_reason": null, "confidence": 0.74, "soft_edge_verdicts": [{"accept": false, "left": "issue:43541", "reason": "Both are runtime/dtype-style model errors, but they involve different models and different execution paths (torch dynamo grouped_mm tracing vs image preprocessor/model dtype mismatch).", "right": "issue:43716"}, {"accept": false, "left": "issue:44918", "reason": "Different problems: TRL SFT embedding unpacking vs a TP-only failure on GPT-OSS MoE. Same broad MoE/train-time area, but not the same bug.", "right": "issue:45161"}, {"accept": false, "left": "issue:34928", "reason": "Both are distributed/runtime failures, but the triggers differ materially: FSDP + activation checkpointing tensor recompute vs Qwen3 auto device mapping cudaErrorAssert.", "right": "issue:41720"}, {"accept": false, "left": "issue:43828", "reason": "Both mention model runtime issues, but one is an autocast dtype mismatch on Phi-tiny-MoE and the other is a Qwen-3 NVFP4 error; no clear shared code-path bug.", "right": "issue:44292"}, {"accept": false, "left": "issue:43716", "reason": "Different models and different failure modes; both may involve precision/quantization, but the underlying bugs are not clearly the same.", "right": "issue:44292"}, {"accept": false, "left": "issue:44292", "reason": "Qwen-3 NVFP4 runtime error vs Qwen3.5/TR L SFT embedding unpacking error; these do not look like one concrete defect.", "right": "issue:44918"}, {"accept": false, "left": "issue:43039", "reason": "Liger Kernel cross_entropy being called is a different issue from Qwen3.5 embedding unpacking in TRL SFT training.", "right": "issue:44918"}, {"accept": true, "left": "issue:41762", "reason": "Strong match: both report DeepSpeed ZeRO-3 with the exact same IndexError text ('index 0 is out of bounds for dimension 0 with size 0'), indicating the same underlying zero-sized-tensor bug.", "right": "issue:43638"}, {"accept": false, "left": "issue:43827", "reason": "Both are outdated-docs reports after v5 removals, but they concern different deprecated references (pipeline() vs run command), so they are not the same change.", "right": "issue:44512"}, {"accept": false, "left": "issue:43716", "reason": "Different model families and different symptoms: dtype mismatch in Mistral-3 image preprocessing vs StopIteration in Qwen3-vl video embedding loading.", "right": "issue:44560"}, {"accept": false, "left": "issue:43716", "reason": "A dtype mismatch in Mistral-3 is not the same bug as Qwen3.5 input embedding unpacking under TRL SFT.", "right": "issue:44918"}, {"accept": false, "left": "issue:43039", "reason": "Liger Kernel cross_entropy invocation and an autocast dtype mismatch are related only at a very high level; they are not the same concrete defect.", "right": "issue:43828"}, {"accept": false, "left": "issue:30277", "reason": "Completely different classes of failure: distributed collective mismatch with Jamba/ZeRO3 vs a trainer API signature error for compute_loss.", "right": "issue:36331"}, {"accept": false, "left": "issue:43039", "reason": "Cross-entropy/Liger Kernel behavior is unrelated to this mask-vs-indexed-tensor shape mismatch.", "right": "issue:44805"}, {"accept": false, "left": "issue:34928", "reason": "Activation checkpointing/FSDP recomputation mismatch and a GenerationMixin import error are unrelated bugs.", "right": "issue:36010"}], "summary": "This cluster is mostly heterogeneous: several unrelated model/runtime issues and a couple of stale-doc reports. The only credible duplicate pair is the DeepSpeed ZeRO-3 IndexError reports (41762/43638); the rest should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43541|issue:43716", "issue:44918|issue:45161", "issue:34928|issue:41720", "issue:43828|issue:44292", "issue:43716|issue:44292", "issue:44292|issue:44918", "issue:43039|issue:44918", "issue:41762|issue:43638", "issue:43827|issue:44512", "issue:43716|issue:44560", "issue:43716|issue:44918", "issue:43039|issue:43828", "issue:30277|issue:36331", "issue:43039|issue:44805", "issue:34928|issue:36010"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7122, "estimated_input_tokens": 3433, "item_count": 18, "node_count": 18, "serialized_chars": 13732, "soft_pair_count": 11}, "cached_at": "2026-04-14T15:20:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "dbe7df7542004a45ffeb02c4e86a216335a47e496656c3f8cbdda62518d30740", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39290", "issue:41093", "issue:41720", "issue:41762", "issue:43039", "issue:43404", "issue:43425", "issue:43531", "issue:43596", "issue:43638", "issue:43792", "issue:43854", "issue:43866", "issue:43901", "issue:44512", "issue:44560", "issue:44863", "issue:45070"], "result": {"analyst_result": {"best_issue_reason": "45070 is the least specific and most widely connected issue in the group, so it is the best anchor despite the lack of a true duplicate hub.", "best_pr_reason": null, "canonical_issue_reason": "If a single issue must represent the set, 45070 is the broadest and most system-level regression, with the most inbound references; the other issues are narrower, model-specific, or documentation-only.", "canonical_pr_reason": null, "confidence": 0.21, "soft_edge_verdicts": [{"accept": false, "left": "issue:41762", "reason": "Both are DeepSpeed ZeRO-3 index errors, but they involve different models and different failure contexts; not enough evidence for the same underlying bug.", "right": "issue:43596"}, {"accept": false, "left": "issue:43901", "reason": "Both are docs-related, but they cover different deprecated/changed text and different APIs; these are separate documentation issues.", "right": "issue:44512"}, {"accept": false, "left": "issue:39290", "reason": "Gemma3TextConfig missing a field under vLLM and Qwen3 auto device mapping causing a CUDA assert are different bugs and code paths.", "right": "issue:41720"}, {"accept": false, "left": "issue:43039", "reason": "Liger Kernel cross-entropy being called and Qwen3-vl-embedding video StopIteration are unrelated subsystems with different symptoms.", "right": "issue:44560"}, {"accept": false, "left": "issue:43866", "reason": "A corrupted Ovis2 checkpoint is not the same as NemotronH checkpoint loading support failing; different root causes.", "right": "issue:44863"}, {"accept": false, "left": "issue:43854", "reason": "GLM-4.7-Flash unit-test loading failure and Ovis2 checkpoint corruption are distinct model-specific issues.", "right": "issue:43866"}, {"accept": false, "left": "issue:43425", "reason": "Torch 2.10 incompatibility is a dependency/runtime issue, while 45070 is a pydantic model-field regression in PretrainedConfig; unrelated changes.", "right": "issue:45070"}, {"accept": false, "left": "issue:39290", "reason": "Both mention sliding_window, but one is a missing Gemma3 config attribute and the other is a Qwen3-MoE sliding-window behavior issue.", "right": "issue:43531"}, {"accept": false, "left": "issue:41093", "reason": "Both are index/shape errors around training/loading, but one is a mask-vs-tensor mismatch and the other is a zero-size index error under ZeRO-3 with Bert; not the same bug.", "right": "issue:43638"}, {"accept": false, "left": "issue:43792", "reason": "Whisper loading failure and a PretrainedConfig pydantic regression are unrelated.", "right": "issue:45070"}, {"accept": false, "left": "issue:43404", "reason": "Mistral3 weight-tying bug and Ovis2 checkpoint corruption are different underlying problems.", "right": "issue:43866"}], "summary": "This cluster is very heterogeneous: most items are distinct model-specific runtime/load errors or docs regressions, and the soft similarity links are not strong enough to treat as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:41762|issue:43596", "issue:43901|issue:44512", "issue:39290|issue:41720", "issue:43039|issue:44560", "issue:43866|issue:44863", "issue:43854|issue:43866", "issue:43425|issue:45070", "issue:39290|issue:43531", "issue:41093|issue:43638", "issue:43792|issue:45070", "issue:43404|issue:43866"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7120, "estimated_input_tokens": 3432, "item_count": 18, "node_count": 18, "serialized_chars": 13725, "soft_pair_count": 11}, "cached_at": "2026-04-14T15:21:18Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "eee8051569a2e97f399d867e41155985b3c352f2c78fe41105f84a5f513dd75c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:39290", "issue:41093", "issue:41762", "issue:43296", "issue:43366", "issue:43531", "issue:43541", "issue:43572", "issue:43828", "issue:44291", "issue:44387", "issue:44589", "issue:44841", "issue:45005", "issue:45070", "issue:45084", "issue:45161"], "result": {"analyst_result": {"best_issue_reason": "issue:45070 is the broadest open v5 regression report and the closest thing to a cluster representative, but it should not be treated as a duplicate hub for the other issues.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43541", "reason": "Different models and failure paths: Mixtral grouped_mm tracing error vs Phi-tiny-MoE autocast dtype mismatch.", "right": "issue:43828"}, {"accept": false, "left": "issue:36010", "reason": "Unrelated errors: an import failure for GenerationMixin vs a Gemma3TextConfig missing attribute in vLLM.", "right": "issue:39290"}, {"accept": false, "left": "issue:44841", "reason": "Both are runtime failures, but one is a processor bug for Voxtral and the other is a compile-time template-node error.", "right": "issue:45084"}, {"accept": false, "left": "issue:41093", "reason": "Both are IndexErrors, but the specific tensor/mask mismatch and the DeepSpeed ZeRO-3 load-time out-of-bounds bug are different code paths.", "right": "issue:41762"}, {"accept": false, "left": "issue:43296", "reason": "One is a model-load failure in vLLM; the other is a feature request to add GGUF GPT-OSS support.", "right": "issue:43366"}, {"accept": false, "left": "issue:43572", "reason": "Both touch v5 config behavior, but one is a missing StableLmConfig field and the other is a broader pydantic PretrainedConfig regression.", "right": "issue:45070"}, {"accept": false, "left": "issue:44387", "reason": "Different problems: int4 quantization memory growth/OOM vs tied-weights handling for translation models.", "right": "issue:45005"}, {"accept": false, "left": "issue:43541", "reason": "Both mention MoE, but the traced Mixtral grouped_mm crash is a different bug from GPT-OSS tensor-parallel support.", "right": "issue:45161"}, {"accept": false, "left": "issue:44291", "reason": "Both are v5 initialization/config regressions, but the unexpected _is_hf_initialized argument and the pydantic PretrainedConfig field break are separate defects.", "right": "issue:45070"}, {"accept": false, "left": "issue:43531", "reason": "Qwen3-MoE sliding_window handling is a model-specific bug, not the same as the PretrainedConfig pydantic break.", "right": "issue:45070"}, {"accept": false, "left": "issue:43541", "reason": "Different symptoms and subsystems: torch dynamo grouped_mm tracing vs a missing Float8_e4m3fnStorage object.", "right": "issue:44589"}], "summary": "The cluster is heterogeneous: most items are distinct regressions, model-specific load failures, or feature requests that only loosely share Transformers/vLLM/v5 context. No soft pair looks like a safe duplicate merge."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43541|issue:43828", "issue:36010|issue:39290", "issue:44841|issue:45084", "issue:41093|issue:41762", "issue:43296|issue:43366", "issue:43572|issue:45070", "issue:44387|issue:45005", "issue:43541|issue:45161", "issue:44291|issue:45070", "issue:43531|issue:45070", "issue:43541|issue:44589"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6950, "estimated_input_tokens": 3347, "item_count": 18, "node_count": 18, "serialized_chars": 13388, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:25:03Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0f26fd50c5d8553e773d06db8c8401718d51be4865a722dca38ba4f19e9530c4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39186", "issue:39290", "issue:40990", "issue:41093", "issue:42915", "issue:43054", "issue:43404", "issue:43541", "issue:43596", "issue:43645", "issue:43742", "issue:43994", "issue:44530", "issue:44863", "issue:44918", "issue:44960", "issue:45005", "issue:45399"], "result": {"analyst_result": {"best_issue_reason": "issue:43994 is the strongest representative of the cluster because it captures the core symptom as an end-to-end model/output failure, not just a comparative quality complaint.", "best_pr_reason": null, "canonical_issue_reason": "issue:43994 is the better canonical because it states the broader user-visible failure mode for siglip2 with AutoModel/pipeline and is more directly actionable.", "canonical_pr_reason": null, "confidence": 0.74, "soft_edge_verdicts": [{"accept": false, "left": "issue:39290", "reason": "Both are loading/config errors, but one is a Gemma3 config attribute regression with vLLM and the other is a MobileLLM key error; too different to be the same bug.", "right": "issue:43742"}, {"accept": true, "left": "issue:43054", "reason": "Both concern siglip2 producing bad/incorrect embeddings or outputs, and both read like the same underlying model handling regression affecting inference quality.", "right": "issue:43994"}, {"accept": false, "left": "issue:41093", "reason": "Both are tensor shape/indexing failures, but they occur in different contexts (mask mismatch vs deepspeed zero3 init) and do not point to the same code path.", "right": "issue:43596"}, {"accept": false, "left": "issue:42915", "reason": "Different models and different failure modes: FineGrainedFP8Config on Qwen3Moe versus grouped_mm torch-dynamo tracing on Mixtral.", "right": "issue:43541"}, {"accept": false, "left": "issue:40990", "reason": "One is a perplexity/benchmark complaint on gpt-oss-20b, the other is a GLM5 issue; no clear shared bug or change.", "right": "issue:44960"}, {"accept": false, "left": "issue:39186", "reason": "FSDP 'weight must be 2-D' and TRL SFT embedding unpacking are separate embedding-related failures, but not the same concrete bug.", "right": "issue:44918"}, {"accept": false, "left": "issue:43404", "reason": "Both mention tied weights, but one is a specific Mistral3 lm_head tying bug and the other is a broad v5 translation-model report; too general to merge.", "right": "issue:45005"}, {"accept": false, "left": "issue:43645", "reason": "Jupyter/custom-model initialization breakage and flash-attn2 fallback gating are unrelated.", "right": "issue:45399"}, {"accept": false, "left": "issue:44530", "reason": "PagedAttentionCache linear_attention crash on Qwen3.5 is a different subsystem and failure mode from tied-weights issues.", "right": "issue:45005"}, {"accept": false, "left": "issue:44863", "reason": "NemotronH checkpoint loading is unrelated to the v5 tied-weights translation-model issue.", "right": "issue:45005"}], "summary": "Most items are unrelated model-specific bugs. The only likely duplicate pair is the two Siglip2 reports, both describing incorrect or nonsensical outputs from siglip2; the rest are too far apart to merge confidently."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:39290|issue:43742", "issue:43054|issue:43994", "issue:41093|issue:43596", "issue:42915|issue:43541", "issue:40990|issue:44960", "issue:39186|issue:44918", "issue:43404|issue:45005", "issue:43645|issue:45399", "issue:44530|issue:45005", "issue:44863|issue:45005"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6878, "estimated_input_tokens": 3311, "item_count": 18, "node_count": 18, "serialized_chars": 13243, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:25:53Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "251cfbf827cee77bc25ad018a128d59652e6005f87ecebfda42c383edfab6759", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39692", "issue:41553", "issue:42915", "issue:43054", "issue:43644", "issue:43828", "issue:43873", "issue:43883", "issue:43950", "issue:43975", "issue:44292", "issue:44360", "issue:44492", "issue:44512", "issue:44534", "issue:44928", "issue:45005", "issue:45399"], "result": {"analyst_result": {"best_issue_reason": "43950 is the best cluster anchor because it names the concrete failure mode and regression behavior directly, making it the strongest canonical issue for the shared bug.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43950 is the most precise representative of the duplicate trio about `from_pretrained()` corrupting `register_buffer(persistent=False)` state in v5, with a clearer root-cause framing than the broader variants.", "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:43873", "reason": "Different problems: quantization/offloading behavior versus tied-weight handling in translation models.", "right": "issue:45005"}, {"accept": false, "left": "issue:44360", "reason": "Unrelated: a DSA indexer bug versus a documentation typo about a removed command.", "right": "issue:44512"}, {"accept": false, "left": "issue:42915", "reason": "Different code paths: FineGrainedFP8Config failure for Qwen3Moe versus flash-attn2 fallback gating.", "right": "issue:45399"}, {"accept": false, "left": "issue:43975", "reason": "Different model-specific failures with different symptoms; not the same underlying bug.", "right": "issue:44292"}, {"accept": false, "left": "issue:44492", "reason": "A docs typo and a removed-command documentation issue are not the same change or bug.", "right": "issue:44512"}, {"accept": true, "left": "issue:43950", "reason": "Same underlying regression: v5 mishandles non-persistent buffers during `from_pretrained()`, described with different wording.", "right": "issue:44534"}, {"accept": true, "left": "issue:43644", "reason": "Same bug family: both report v5 corrupting/filling non-persistent buffers with junk during model loading.", "right": "issue:43950"}, {"accept": false, "left": "issue:43828", "reason": "Different failure modes and models; one is a dtype mismatch under autocast, the other is RLHF NaN/gradient explosion from a separate attention/position_ids issue.", "right": "issue:44928"}, {"accept": false, "left": "issue:39692", "reason": "Both mention SigLIP2, but one is docs/example errors and the other is embedding quality regression; not the same bug.", "right": "issue:43054"}, {"accept": false, "left": "issue:41553", "reason": "Unrelated: an AutoTokenizer error message issue versus a missing attribute on a model class.", "right": "issue:43883"}], "summary": "Most items are unrelated. The only clear duplicate cluster is the non-persistent-buffer corruption regression in Transformers v5, spanning three issues; the rest should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43873|issue:45005", "issue:44360|issue:44512", "issue:42915|issue:45399", "issue:43975|issue:44292", "issue:44492|issue:44512", "issue:43950|issue:44534", "issue:43644|issue:43950", "issue:43828|issue:44928", "issue:39692|issue:43054", "issue:41553|issue:43883"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7066, "estimated_input_tokens": 3405, "item_count": 18, "node_count": 18, "serialized_chars": 13620, "soft_pair_count": 11}, "cached_at": "2026-04-14T15:26:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f8cf85ca75e6a1fa27a48baee643018cc84bb7be70d4941e511cad8633ebdb88", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36331", "issue:38175", "issue:41553", "issue:41762", "issue:42947", "issue:43054", "issue:43493", "issue:43643", "issue:43856", "issue:43866", "issue:43872", "issue:43881", "issue:43883", "issue:44387", "issue:44403", "issue:44451", "issue:44589", "issue:45020"], "result": {"analyst_result": {"best_issue_reason": "43493 is the strongest issue to anchor the cluster around: it is the clearest statement of the shared SigLIP2 bug theme, while most other issues are unrelated.", "best_pr_reason": null, "canonical_issue_reason": "43493 is the best representative of the only coherent subcluster because it names the underlying SigLIP2 HF-vs-JAX implementation discrepancy that also explains 43054's degraded embeddings.", "canonical_pr_reason": null, "confidence": 0.61, "soft_edge_verdicts": [{"accept": false, "left": "issue:43866", "reason": "Checkpoint corruption and loading noise are different problems with no shared concrete bug path.", "right": "issue:44403"}, {"accept": false, "left": "issue:36331", "reason": "A custom loss-signature TypeError and a ZeRO-3 loading IndexError are unrelated failures.", "right": "issue:41762"}, {"accept": false, "left": "issue:43872", "reason": "Both are backend-compatibility/loading errors, but they involve different APIs and error modes.", "right": "issue:44589"}, {"accept": false, "left": "issue:41553", "reason": "These are different model-loading failures with no evidence of the same root cause.", "right": "issue:43881"}, {"accept": false, "left": "issue:38175", "reason": "Both involve SigLIP2, but zero probabilities and worse embeddings are only loosely related symptoms; not enough to call them the same bug.", "right": "issue:43054"}, {"accept": false, "left": "issue:42947", "reason": "Gradient checkpointing ineffectiveness and int4 quantization reserved-memory growth are separate memory regressions.", "right": "issue:44387"}, {"accept": false, "left": "issue:43881", "reason": "Different models, different errors, and no shared code-path evidence.", "right": "issue:43883"}, {"accept": true, "left": "issue:43054", "reason": "Both describe the same SigLIP2 implementation discrepancy/degradation, and one fix could plausibly address both reports.", "right": "issue:43493"}, {"accept": false, "left": "issue:44403", "reason": "Loading log noise is not the same as an actual inability to load ScandiBERT.", "right": "issue:44451"}, {"accept": false, "left": "issue:42947", "reason": "These are separate training-memory complaints with different model families and mechanisms.", "right": "issue:43856"}, {"accept": false, "left": "issue:43643", "reason": "The AutoConfig missing-fields bug is a specific remote_code symptom, while 45020 is a broader general report and not clearly the same concrete defect.", "right": "issue:45020"}], "summary": "This cluster is mostly heterogeneous. The only clear duplicate-like pair is the SigLIP2 regression subset (43054/43493); the rest are distinct loading, compatibility, or training issues that do not look mergeable into one change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43866|issue:44403", "issue:36331|issue:41762", "issue:43872|issue:44589", "issue:41553|issue:43881", "issue:38175|issue:43054", "issue:42947|issue:44387", "issue:43881|issue:43883", "issue:43054|issue:43493", "issue:44403|issue:44451", "issue:42947|issue:43856", "issue:43643|issue:45020"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6694, "estimated_input_tokens": 3219, "item_count": 17, "node_count": 17, "serialized_chars": 12876, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:27:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0a072d85330a2ba8c4b08e41299f874abca0fece10733cdac5edf2c5f0ea016d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41762", "issue:42491", "issue:42915", "issue:43054", "issue:43278", "issue:43782", "issue:43824", "issue:43872", "issue:43975", "issue:44368", "issue:44403", "issue:44451", "issue:44488", "issue:44661", "issue:44960", "issue:45020", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "No true canonical issue stands out because the reports are heterogeneous. If forced to pick one representative, `issue:45020` is the broadest user-facing regression around recent-version loading/remote_code behavior, but it does not subsume the rest.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:44403", "reason": "Both concern loading-related behavior, but one is a generic warning/noise issue and the other is a specific model load failure for `cjvt/sleng-bert`; different symptoms, models, and likely root causes.", "right": "issue:44488"}, {"accept": false, "left": "issue:43782", "reason": "`43782` is a Qwen3VL `from_pretrained` weight-only error, while `44960` is a separate GLM5 issue. Same broad model-loading area, but no evidence of the same code path or bug.", "right": "issue:44960"}, {"accept": false, "left": "issue:42491", "reason": "`42491` is about LoRA compatibility across hf4.x/hf5.x for qwen3_moe; `44368` is a warning about `tie_word_embeddings=False` during Qwen3.5-27B fine-tuning. Different models and different failure modes.", "right": "issue:44368"}, {"accept": false, "left": "issue:41762", "reason": "`41762` is a Gemma3 + DeepSpeed ZeRO-3 loading crash; `42915` is a Qwen3Moe FP8 config failure. Both are loading failures, but the affected model/config stack is unrelated.", "right": "issue:42915"}, {"accept": false, "left": "issue:43975", "reason": "`43975` is a DeepSeek Coder detokenization regression, while `44451` is a model loading failure for `vesteinn/ScandiBERT`. Different subsystems and no shared concrete bug.", "right": "issue:44451"}, {"accept": false, "left": "issue:45020", "reason": "`45020` is a broad `remote_code` compatibility regression, but `45356` is a specific Kimi-K2.5 tokenizer/codec regression between versions 5.3.0 and 5.4.0. Related release context, not the same bug.", "right": "issue:45356"}, {"accept": false, "left": "issue:44661", "reason": "`44661` is a tokenizer-mapping edge case in `add-new-model-like`; `45020` is a broader remote-code breakage report. They touch model loading, but the concrete failure paths differ.", "right": "issue:45020"}, {"accept": false, "left": "issue:43824", "reason": "`43824` is an import/export symbol problem for `Qwen2_5_VLForConditionalGeneration`; `43872` is a bitsandbytes `Int8Params` constructor incompatibility. Separate packages and separate failure mechanisms.", "right": "issue:43872"}, {"accept": false, "left": "issue:43975", "reason": "A detokenization bug in DeepSeek Coder is unrelated to a model load failure for `cjvt/sleng-bert`; there is no shared underlying change.", "right": "issue:44488"}, {"accept": false, "left": "issue:43054", "reason": "`43054` compares SigLIP2 text embeddings to SigLIP quality, while `43278` reports a dtype mismatch between training and evaluation. The latter could affect numerics, but the reports are not the same bug.", "right": "issue:43278"}], "summary": "This cluster is not a duplicate set: it mixes unrelated loading, tokenizer, dtype, quantization, and version-regression reports across different models and subsystems. None of the soft-similarity pairs look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44403|issue:44488", "issue:43782|issue:44960", "issue:42491|issue:44368", "issue:41762|issue:42915", "issue:43975|issue:44451", "issue:45020|issue:45356", "issue:44661|issue:45020", "issue:43824|issue:43872", "issue:43975|issue:44488", "issue:43054|issue:43278"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6882, "estimated_input_tokens": 3313, "item_count": 17, "node_count": 17, "serialized_chars": 13251, "soft_pair_count": 11}, "cached_at": "2026-04-14T15:28:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f3c223b907eabe1b1c926efabb7fb21301227f7e58782a19c3efd035cca6946f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41553", "issue:42915", "issue:43257", "issue:43404", "issue:43742", "issue:43792", "issue:43827", "issue:43866", "issue:43872", "issue:43950", "issue:44220", "issue:44291", "issue:44509", "issue:44991", "issue:45020", "issue:45081", "issue:45399"], "result": {"analyst_result": {"best_issue_reason": "#44291 is the strongest representative issue overall because it is concrete, reproducible, and describes a specific code-path regression rather than a broad symptom or docs-only problem.", "best_pr_reason": null, "canonical_issue_reason": "Issue #44291 is the clearest canonical bug report: it names the exact failure mode, version scope, and underlying `_is_hf_initialized` argument regression, and it has a strong duplicate match in #43872.", "canonical_pr_reason": null, "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "issue:44991", "reason": "One is a tokenizer-loading failure for a specific model; the other is an attention-kernel fallback gating problem. Different code paths and different failure classes.", "right": "issue:45399"}, {"accept": false, "left": "issue:43792", "reason": "Both involve Whisper/audio, but #43792 is a model-level run failure while #44220 is a specific feature-extraction function issue; not enough evidence they are the same bug.", "right": "issue:44220"}, {"accept": true, "left": "issue:43827", "reason": "Same docs regression: both report removed v5 pipeline tasks still being referenced in documentation.", "right": "issue:44509"}, {"accept": false, "left": "issue:45081", "reason": "Tokenizer regex patch crash vs flash-attn fallback gating; unrelated behavior and no shared underlying defect.", "right": "issue:45399"}, {"accept": false, "left": "issue:43404", "reason": "Tie-weights bug in Mistral3 generation model vs silent corruption of non-persistent buffers during from_pretrained; distinct problems.", "right": "issue:43950"}, {"accept": false, "left": "issue:42915", "reason": "Qwen3MoE FP8 config failure and broad remote_code breakage are not the same concrete bug.", "right": "issue:45020"}, {"accept": false, "left": "issue:43257", "reason": "Qwen3 MoE conversion/deepspeed issue and Ovis2 checkpoint corruption do not share the same underlying failure.", "right": "issue:43866"}, {"accept": false, "left": "issue:41553", "reason": "Both are tokenizer-related, but they concern different models and different symptoms; not clearly the same defect.", "right": "issue:44991"}, {"accept": false, "left": "issue:43742", "reason": "A key error loading one model is too specific to equate with the broad remote_code compatibility regression.", "right": "issue:45020"}, {"accept": false, "left": "issue:43742", "reason": "Model-loading KeyError and flash-attn fallback blocking are unrelated.", "right": "issue:45399"}, {"accept": true, "left": "issue:43872", "reason": "Both describe the same `_is_hf_initialized` TypeError regression when loading models with `init_empty_weights` / bitsandbytes-related initialization.", "right": "issue:44291"}], "summary": "Mostly a set of unrelated issues; only two pairs look like true duplicates: the v5 docs pipeline-task removals, and the `_is_hf_initialized` / `init_empty_weights` TypeError regression. The rest share broad subsystem vocabulary but not the same underlying bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44991|issue:45399", "issue:43792|issue:44220", "issue:43827|issue:44509", "issue:45081|issue:45399", "issue:43404|issue:43950", "issue:42915|issue:45020", "issue:43257|issue:43866", "issue:41553|issue:44991", "issue:43742|issue:45020", "issue:43742|issue:45399", "issue:43872|issue:44291"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7096, "estimated_input_tokens": 3420, "item_count": 18, "node_count": 18, "serialized_chars": 13678, "soft_pair_count": 11}, "cached_at": "2026-04-14T15:29:47Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8ea18824f049dc2ddb2e3761ba4074fec6015bdf40074624de595431d068fd0d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:24643", "issue:30064", "issue:42617", "issue:42915", "issue:43366", "issue:43531", "issue:43550", "issue:43645", "issue:43646", "issue:43824", "issue:43828", "issue:43950", "issue:43957", "issue:44560", "issue:44589", "issue:44910", "issue:44918", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": "43645 is the strongest representative issue in this set only because it subsumes 43646; otherwise the cluster does not have a single coherent canonical issue.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43645 is the best canonical among the only true duplicate pair: it is the more specific report and clearly describes the regression affecting custom model definition/initialization in Jupyter notebooks. The remaining issues are not the same underlying bug.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43366", "reason": "Different problems: GGUF architecture support vs a Phi-tiny-MoE dtype mismatch under autocast.", "right": "issue:43828"}, {"accept": false, "left": "issue:24643", "reason": "Unrelated domains: DeepSpeed training RuntimeError vs image processor handling of void segmentation maps.", "right": "issue:30064"}, {"accept": false, "left": "issue:43366", "reason": "Both mention Qwen, but one is GGUF gpt-oss support and the other is a sliding_window bug in Qwen3-MoE.", "right": "issue:43531"}, {"accept": false, "left": "issue:44560", "reason": "Both involve Qwen3.x, but one is a video StopIteration in Qwen3-vl-embedding and the other is TRL SFT unpacking input embeddings.", "right": "issue:44918"}, {"accept": false, "left": "issue:43366", "reason": "Different failure modes: model-architecture support request vs missing Float8 storage type during loading.", "right": "issue:44589"}, {"accept": false, "left": "issue:43950", "reason": "Different regressions: non-persistent buffer corruption in from_pretrained vs Qwen2.5-VL rope_index scaling for still images.", "right": "issue:45325"}, {"accept": false, "left": "issue:43550", "reason": "Both are attention/runtime crashes, but they affect different models and code paths (Bamba SDPA compile vs Qwen3.5 flash-attn position_ids handling).", "right": "issue:44910"}, {"accept": true, "left": "issue:43645", "reason": "Same underlying regression: Transformers 5.0.0 breaking custom model initialization; the notebook-specific wording in 43645 is a narrower version of the same bug.", "right": "issue:43646"}, {"accept": false, "left": "issue:43824", "reason": "Different root causes: missing Qwen2.5-VL import/export vs meta-device loading failures for other models.", "right": "issue:43957"}, {"accept": false, "left": "issue:42915", "reason": "Different issues: failure running 3d_parallel.py vs model loading with torch.device('meta').", "right": "issue:43957"}, {"accept": false, "left": "issue:42617", "reason": "Not the same problem: inability to run 3d_parallel.py vs GGUF support for gpt-oss.", "right": "issue:43366"}], "summary": "This cluster is mostly heterogeneous: the items cover unrelated bugs/features across Transformers, Qwen, vision, model loading, and training. I found one true duplicate pair (two reports of the same custom-model initialization regression in Transformers 5.0.0); the rest are only superficially similar by shared terms like Qwen, loading, or dtype/compile errors."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43366|issue:43828", "issue:24643|issue:30064", "issue:43366|issue:43531", "issue:44560|issue:44918", "issue:43366|issue:44589", "issue:43950|issue:45325", "issue:43550|issue:44910", "issue:43645|issue:43646", "issue:43824|issue:43957", "issue:42915|issue:43957", "issue:42617|issue:43366"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6980, "estimated_input_tokens": 3362, "item_count": 18, "node_count": 18, "serialized_chars": 13446, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:30:21Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "87c164985ff411bc5edf391e46e6baeba168d484cc3023bf0456dedcff102528", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:38175", "issue:39692", "issue:42915", "issue:43317", "issue:43596", "issue:43638", "issue:43644", "issue:43646", "issue:43950", "issue:43994", "issue:44534", "issue:44560", "issue:44661", "issue:44756", "issue:44805", "issue:44877", "issue:45030"], "result": {"analyst_result": {"best_issue_reason": "`issue:43950` is the most representative standalone issue in the cluster: it is specific, technically clear, and describes the underlying regression in a way that maps well to the strongest duplicate pair.", "best_pr_reason": null, "canonical_issue_reason": "Best canonical issue for the strongest duplicate subgroup is `issue:43950`: it precisely describes the non-persistent-buffer regression and cleanly covers the closely matching reports `issue:43644` and `issue:44534`. The rest of the items are separate bugs, so there is no single canonical issue for the entire cluster.", "canonical_pr_reason": null, "confidence": 0.86, "soft_edge_verdicts": [{"accept": true, "left": "issue:38175", "reason": "Same SigLIP2 symptom on the same model family; both report incorrect/degenerate outputs rather than different bugs.", "right": "issue:43994"}, {"accept": false, "left": "issue:44560", "reason": "Different models and different failures: a Qwen3-VL video StopIteration vs a generic mask-shape IndexError.", "right": "issue:44805"}, {"accept": false, "left": "issue:43646", "reason": "Related to Transformers 5 initialization regressions, but the concrete failures differ: custom model init vs non-persistent buffer corruption.", "right": "issue:43950"}, {"accept": false, "left": "issue:44877", "reason": "Both mention stricter config loading, but they target different model configs and do not clearly describe the same underlying bug.", "right": "issue:45030"}, {"accept": false, "left": "issue:36010", "reason": "Unrelated failures: an import error for GenerationMixin vs a Qwen3Moe FP8 loading issue.", "right": "issue:42915"}, {"accept": false, "left": "issue:43638", "reason": "Different code paths and symptoms: DeepSpeed ZeRO3/Bert index error vs add-new-model-like failing on TOKENIZER_MAPPING_NAMES.", "right": "issue:44661"}, {"accept": false, "left": "issue:43596", "reason": "Different failures and contexts; both involve model-loading internals, but not the same concrete bug.", "right": "issue:44661"}, {"accept": false, "left": "issue:43317", "reason": "Both are memory/device-loading related, but one is dequantized device_map offload and the other is mmap OOM on Strix Halo.", "right": "issue:44756"}, {"accept": true, "left": "issue:43644", "reason": "Near-identical regression description: both report non-persistent buffers being filled with junk in Transformers v5.", "right": "issue:44534"}, {"accept": false, "left": "issue:38175", "reason": "Same model family, but one is output quality/zero probabilities and the other is a documentation example with model/processor mismatch and quantization issues.", "right": "issue:39692"}], "summary": "This cluster is not one duplicate set; it splits into several unrelated themes. The only clear duplicate pairs are the SigLIP2 model behavior reports and the non-persistent-buffer regression reports."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:38175|issue:43994", "issue:44560|issue:44805", "issue:43646|issue:43950", "issue:44877|issue:45030", "issue:36010|issue:42915", "issue:43638|issue:44661", "issue:43596|issue:44661", "issue:43317|issue:44756", "issue:43644|issue:44534", "issue:38175|issue:39692"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6436, "estimated_input_tokens": 3090, "item_count": 17, "node_count": 17, "serialized_chars": 12359, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:31:02Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f74cc862a92b35cda19ffcca76e3fed20ddde72a08988fdcbb615be561bdc5e1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39692", "issue:41720", "issue:42915", "issue:43708", "issue:43856", "issue:43873", "issue:43994", "issue:44075", "issue:44451", "issue:44488", "issue:44960", "issue:44977", "issue:44991", "issue:45313", "issue:45357", "issue:45362", "issue:45406"], "result": {"analyst_result": {"best_issue_reason": "issue:43994 is the best single issue to anchor the cluster: it is concrete, model-specific, and captures the runtime symptom the duplicate appears to manifest as.", "best_pr_reason": null, "canonical_issue_reason": "issue:43994 is the cleaner canonical because it describes the end-user failure on the actual SigLIP2 model/pipeline path, while issue:39692 is more documentation-focused and broader in scope.", "canonical_pr_reason": null, "confidence": 0.79, "soft_edge_verdicts": [{"accept": false, "left": "issue:45313", "reason": "Different operations and failures: ZeRO-3 weight loading for language_model vs save_pretrained writing incorrect visual encoder keys.", "right": "issue:45357"}, {"accept": false, "left": "issue:45362", "reason": "Different models and code paths: Qwen3.5 chat crash vs Gemma4Processor missing _tokenizer in serve.", "right": "issue:45406"}, {"accept": false, "left": "issue:44960", "reason": "Different model families and symptoms: GLM5 issue vs Qwen3.5 flash-attention generation failure.", "right": "issue:44977"}, {"accept": false, "left": "issue:44451", "reason": "Both are model-loading complaints, but they target different repositories/models and no shared concrete bug is evident from the titles alone.", "right": "issue:44488"}, {"accept": false, "left": "issue:43708", "reason": "Unrelated Trainer checkpoint math bug vs SGD optimizer arguments not being applied.", "right": "issue:44075"}, {"accept": false, "left": "issue:44488", "reason": "Both involve loading models, but they are different models with distinct reported failures; not the same underlying bug.", "right": "issue:44991"}, {"accept": false, "left": "issue:43856", "reason": "Different problems in Qwen3 MoE: memory inefficiency during training vs quantization/offloading not behaving as expected.", "right": "issue:43873"}, {"accept": false, "left": "issue:41720", "reason": "Different model/config combinations and failure modes: auto device mapping cuda assert vs FineGrainedFP8Config failure.", "right": "issue:42915"}, {"accept": true, "left": "issue:39692", "reason": "Both point to the same SigLIP2 model/processor mismatch causing incorrect or nonsensical results; the docs example and the runtime symptom are the same underlying bug.", "right": "issue:43994"}], "summary": "Only the SigLIP2 pair looks like the same underlying bug; the other soft pairs span different models, configs, or code paths and should remain separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:45313|issue:45357", "issue:45362|issue:45406", "issue:44960|issue:44977", "issue:44451|issue:44488", "issue:43708|issue:44075", "issue:44488|issue:44991", "issue:43856|issue:43873", "issue:41720|issue:42915", "issue:39692|issue:43994"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6964, "estimated_input_tokens": 3354, "item_count": 17, "node_count": 17, "serialized_chars": 13414, "soft_pair_count": 12}, "cached_at": "2026-04-14T15:32:24Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "efce02c1af39454ad2fc8c02c0f249fc92a3a9e4a3675b0594080d3b8f445742", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42491", "issue:42915", "issue:43257", "issue:43404", "issue:43454", "issue:43644", "issue:43645", "issue:43720", "issue:43854", "issue:43856", "issue:43931", "issue:44387", "issue:44661", "issue:44683", "issue:44756", "issue:45276", "issue:45335"], "result": {"analyst_result": {"best_issue_reason": "45276 is the most actionable current issue among the set, with a clear symptom and active status; the other items are either closed, older, or narrower model-specific variants.", "best_pr_reason": null, "canonical_issue_reason": "45276 is the best representative issue in this set: it is open, concrete, and has the closest near-duplicate-style sibling (45335), while still describing a specific regression that can be triaged independently.", "canonical_pr_reason": null, "confidence": 0.9, "soft_edge_verdicts": [{"accept": false, "left": "issue:43854", "reason": "Both are model-loading failures, but they involve different models and different symptoms (GLM-4.7-Flash unit-test loading vs Qwen3-VL weight-shape mismatch).", "right": "issue:43931"}, {"accept": false, "left": "issue:44661", "reason": "These are unrelated bugs: one is about add-new-model-like handling TOKENIZER_MAPPING_NAMES, the other is compiled flex_attention failing on torch >= 2.9.", "right": "issue:44683"}, {"accept": false, "left": "issue:42915", "reason": "Qwen3Moe + FineGrainedFP8Config and Strix Halo mmap OOM are different root causes and different subsystems.", "right": "issue:44756"}, {"accept": false, "left": "issue:42491", "reason": "Both mention loading/accelerate, but one is a Qwen3 MoE LoRA hf4.x/hf5.x compatibility issue and the other is a BitNet packed-weight unpacking bug.", "right": "issue:43720"}, {"accept": false, "left": "issue:42491", "reason": "These are unrelated: Qwen3 MoE LoRA compatibility versus Gemma4 resize_token_embeddings propagation.", "right": "issue:45276"}, {"accept": false, "left": "issue:43257", "reason": "Different model-specific loading bugs: Qwen3 MoE weights not converted under accelerate+deepspeed vs GLM-4.7-Flash loading in tests.", "right": "issue:43854"}, {"accept": false, "left": "issue:43856", "reason": "Both are memory complaints, but one is Qwen3 MoE training inefficiency and the other is int4 quantization reserved-memory regression; not the same bug.", "right": "issue:44387"}, {"accept": false, "left": "issue:43644", "reason": "Both are Transformers 5.0 regressions, but one concerns non-persistent buffer initialization and the other Jupyter custom-model initialization; different failures.", "right": "issue:43645"}, {"accept": false, "left": "issue:42491", "reason": "Different model families and error modes: Qwen3 MoE LoRA incompatibility versus Qwen3-VL shape mismatch on load.", "right": "issue:43931"}, {"accept": false, "left": "issue:42491", "reason": "Loading a LoRA model after the hf4.x/hf5.x change is unrelated to Qwen3 MoE training memory usage.", "right": "issue:43856"}, {"accept": false, "left": "issue:45276", "reason": "Very similar symptom, but the affected submodules and model classes differ; this is not enough evidence for one shared concrete code-path bug.", "right": "issue:45335"}, {"accept": false, "left": "issue:43404", "reason": "Both are lm_head tying issues, but they affect different model implementations (Mistral3 vs AyaVision) and are not clearly the same underlying fix.", "right": "issue:43454"}], "summary": "This cluster is mostly a set of superficially similar Transformers issues that split into distinct model families and failure modes (loading, weight tying, memory, token resizing). I do not see any soft pair that is safe to merge as a duplicate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43854|issue:43931", "issue:44661|issue:44683", "issue:42915|issue:44756", "issue:42491|issue:43720", "issue:42491|issue:45276", "issue:43257|issue:43854", "issue:43856|issue:44387", "issue:43644|issue:43645", "issue:42491|issue:43931", "issue:42491|issue:43856", "issue:45276|issue:45335", "issue:43404|issue:43454"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6476, "estimated_input_tokens": 3110, "item_count": 17, "node_count": 17, "serialized_chars": 12438, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:32:53Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "dd65cfddce972b2e9caa26a2af1b3f0bdab87a6f6e924991755b12256576fec9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:39404", "issue:39692", "issue:41720", "issue:42947", "issue:43381", "issue:43425", "issue:43873", "issue:43957", "issue:44155", "issue:44164", "issue:44291", "issue:44387", "issue:44402", "issue:44488", "issue:44756", "issue:44960"], "result": {"analyst_result": {"best_issue_reason": "No issue is a strong global representative; the topics are too mixed to choose one canonical issue without losing important distinctions.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits well because the cluster is heterogeneous and the soft-pair candidates do not describe the same underlying bug.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:36010", "reason": "Import error for GenerationMixin and Whisper pipeline return_language regression are different code paths and failure modes.", "right": "issue:39404"}, {"accept": false, "left": "issue:44164", "reason": "One is about extra_state handling in save/from_pretrained; the other is an init_empty_weights argument mismatch during model loading.", "right": "issue:44291"}, {"accept": false, "left": "issue:44402", "reason": "Both involve tokenizer/model loading, but the concrete bugs differ: vocab-size token placement vs a specific model failing to load.", "right": "issue:44488"}, {"accept": false, "left": "issue:41720", "reason": "Qwen3 auto device mapping CUDA assert and AudioFlamingo3 batched token leakage are unrelated failures.", "right": "issue:44155"}, {"accept": false, "left": "issue:43873", "reason": "Quantization/offloading behavior and CUDA reserved-memory OOM under int4 are not the same concrete bug.", "right": "issue:44387"}, {"accept": false, "left": "issue:39692", "reason": "A SigLIP2 docs-example error and a GLM5 issue are clearly different artifacts and problem reports.", "right": "issue:44960"}, {"accept": false, "left": "issue:43425", "reason": "Torch 2.10 incompatibility is a version-compatibility issue, while the meta-device loading bug is a separate model-loading regression.", "right": "issue:43957"}, {"accept": false, "left": "issue:44387", "reason": "Both mention OOM, but one is CUDA reserved memory under int4 quantization and the other is mmap on Strix Halo; different root causes.", "right": "issue:44756"}, {"accept": false, "left": "issue:42947", "reason": "Gradient checkpointing ineffective with PEFT LoRA and checkpointing unusable in eval mode are distinct checkpointing problems.", "right": "issue:43381"}], "summary": "This cluster is not a duplicate set: the issues span unrelated areas (imports, model loading, quantization/offloading, gradient checkpointing, docs, and model-specific failures). All proposed soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:36010|issue:39404", "issue:44164|issue:44291", "issue:44402|issue:44488", "issue:41720|issue:44155", "issue:43873|issue:44387", "issue:39692|issue:44960", "issue:43425|issue:43957", "issue:44387|issue:44756", "issue:42947|issue:43381"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6972, "estimated_input_tokens": 3358, "item_count": 18, "node_count": 18, "serialized_chars": 13430, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:33:27Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a5ed9c2ed383f0f648b6133fb5334776330388f9a22f5d1ecd38d8aa3b9a4226", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43381", "issue:43646", "issue:43708", "issue:43854", "issue:43957", "issue:44164", "issue:44291", "issue:44292", "issue:44479", "issue:44743", "issue:44756", "issue:44877", "issue:44912", "issue:44928", "issue:44960", "issue:45216", "issue:45276", "issue:45292"], "result": {"analyst_result": {"best_issue_reason": "issue:45292 is the best representative of the only genuine duplicate family in the set, and its title is the least model-specific formulation of that bug.", "best_pr_reason": null, "canonical_issue_reason": "issue:45292 is the broader, more general report of the resize_token_embeddings/output_embeddings bug; issue:45276 appears to be a model-specific instance of the same underlying problem.", "canonical_pr_reason": null, "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "issue:44292", "reason": "Both are model-loading/quantization themed, but one is about an unexpected constructor arg with init_empty_weights while the other is about MXFP4 loading falling back to bf16; different underlying failures.", "right": "issue:44912"}, {"accept": false, "left": "issue:43854", "reason": "GLM-4.7-Flash unit-test loading failure and Qwen3.5 save_pretrained regression are separate model-specific bugs.", "right": "issue:45216"}, {"accept": false, "left": "issue:44291", "reason": "Unexpected _is_hf_initialized arg during init_empty_weights is unrelated to disabling mmap to avoid OOM on Strix Halo.", "right": "issue:44756"}, {"accept": true, "left": "issue:45276", "reason": "These describe the same resize_token_embeddings bug affecting output embeddings/embed_tokens_per_layer; 45276 is the Gemma4-specific instance and 45292 is the broader report.", "right": "issue:45292"}, {"accept": false, "left": "issue:44743", "reason": "One is about recurrent states resetting with cache and seq_len>1; the other is a GLM5 issue with no clear overlap in code path.", "right": "issue:44960"}, {"accept": false, "left": "issue:43381", "reason": "Gradient checkpointing in eval mode and RLHF NaN from 3D position_ids/SDPA fallback are distinct training-time problems.", "right": "issue:44928"}, {"accept": false, "left": "issue:43957", "reason": "Meta-device loading breakage for some models is not the same as mmap/OOM behavior on Strix Halo.", "right": "issue:44756"}, {"accept": false, "left": "issue:44164", "reason": "save/from_pretrained extra_state handling is a serialization bug; disabling mmap for Strix Halo is a memory-loading workaround.", "right": "issue:44756"}, {"accept": false, "left": "issue:43646", "reason": "Custom model initialization breakage in Transformers 5.0.0 and incorrect max_steps calculation on resume_from_checkpoint are unrelated.", "right": "issue:43708"}, {"accept": false, "left": "issue:44479", "reason": "Video-input regression for Qwen VL models and strict config preventing granite_speech loading affect different subsystems and model families.", "right": "issue:44877"}], "summary": "This cluster is mostly a noisy mix of unrelated issue reports. The only clear duplicate pair is the Gemma/Qwen embedding resize bug (45276/45292); the rest describe different code paths and failure modes."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44292|issue:44912", "issue:43854|issue:45216", "issue:44291|issue:44756", "issue:45276|issue:45292", "issue:44743|issue:44960", "issue:43381|issue:44928", "issue:43957|issue:44756", "issue:44164|issue:44756", "issue:43646|issue:43708", "issue:44479|issue:44877"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6932, "estimated_input_tokens": 3338, "item_count": 18, "node_count": 18, "serialized_chars": 13350, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:34:13Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3c6cdff20fc3088c5f2797b2f6623a1c960b4a567de8cad5c4f6a4e409f1fe51", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43257", "issue:43425", "issue:43645", "issue:43716", "issue:43828", "issue:43906", "issue:43957", "issue:44164", "issue:44265", "issue:44402", "issue:44451", "issue:44530", "issue:44589", "issue:44756", "issue:44863", "issue:44898", "issue:45292", "issue:45335"], "result": {"analyst_result": {"best_issue_reason": "Issue 45292 is the best cluster representative because it is the generic report of the resize_token_embeddings problem, with 45335 as a model-specific variant.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45292 is the broadest, most representative version of the resize_token_embeddings bug and matches the only likely duplicate partner most directly.", "canonical_pr_reason": null, "confidence": 0.84, "soft_edge_verdicts": [{"accept": true, "left": "issue:45292", "reason": "Same underlying bug: resize_token_embeddings does not propagate to all tied/decoder embeddings; 45335 is a model-specific instance of the 45292 report.", "right": "issue:45335"}, {"accept": false, "left": "issue:44402", "reason": "Related to tokenizer/vocab sizing, but one is a specific model loading failure and the other is a general load failure; not clearly the same bug.", "right": "issue:44451"}, {"accept": false, "left": "issue:43645", "reason": "Custom-model Jupyter initialization breakage and save/from_pretrained extra_state handling are different code paths and failure modes.", "right": "issue:44164"}, {"accept": false, "left": "issue:43425", "reason": "Torch version incompatibility is a broad environment issue, while the Phi MoE report is a concrete autocast dtype mismatch.", "right": "issue:43828"}, {"accept": false, "left": "issue:44265", "reason": "torch.export + torch_compilable_check failure is unrelated to Perceiver interpolation at non-default resolutions.", "right": "issue:44898"}, {"accept": false, "left": "issue:43257", "reason": "Both involve model loading, but they target different models and different underlying conversion/checkpoint-loading problems.", "right": "issue:44863"}, {"accept": false, "left": "issue:43828", "reason": "Both mention loading-time type issues, but autocast dtype mismatch and meta-device loading breakage are distinct bugs.", "right": "issue:43957"}, {"accept": false, "left": "issue:44589", "reason": "Float8 storage lookup failure and mmap OOM on Strix Halo are unrelated.", "right": "issue:44756"}, {"accept": false, "left": "issue:43906", "reason": "An isolated reproduction of an older issue is not the same underlying bug as the Qwen3.5 PagedAttentionCache crash.", "right": "issue:44530"}, {"accept": false, "left": "issue:43425", "reason": "Torch 2.10 incompatibility and Mistral-3 image preprocessor/model dtype mismatch are different issues.", "right": "issue:43716"}], "summary": "This cluster is mostly heterogeneous: several unrelated loading/dtype/export issues are grouped together. The only strong duplicate-like pair is the resize_token_embeddings reports (45292/45335); the rest should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:45292|issue:45335", "issue:44402|issue:44451", "issue:43645|issue:44164", "issue:43425|issue:43828", "issue:44265|issue:44898", "issue:43257|issue:44863", "issue:43828|issue:43957", "issue:44589|issue:44756", "issue:43906|issue:44530", "issue:43425|issue:43716"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6958, "estimated_input_tokens": 3351, "item_count": 18, "node_count": 18, "serialized_chars": 13402, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:35:21Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "893feea7fb229183ad33fa2045cb5e48108359c2a9337c6056e5c82e549ed937", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39404", "issue:43299", "issue:43425", "issue:43596", "issue:43611", "issue:43645", "issue:43901", "issue:43931", "issue:44479", "issue:44509", "issue:44530", "issue:44589", "issue:44683", "issue:44805", "issue:44863", "issue:44912", "issue:44918", "issue:45084"], "result": {"analyst_result": {"best_issue_reason": "issue:43611 is the best umbrella issue for this set because it is the most general loading-regression report, while the others are narrower or unrelated.", "best_pr_reason": null, "canonical_issue_reason": "issue:43611 is the broadest and most representative model-loading regression here; it captures a core v5 loading breakage rather than a model-specific symptom or documentation-only change.", "canonical_pr_reason": null, "confidence": 0.18, "soft_edge_verdicts": [{"accept": false, "left": "issue:39404", "reason": "Different pipelines and failure modes: Whisper return_language behavior vs MXFP4 quantization fallback on git-oss-20b.", "right": "issue:44912"}, {"accept": false, "left": "issue:43931", "reason": "Both are model-loading problems, but for different architectures and different symptoms; not the same concrete bug.", "right": "issue:44863"}, {"accept": false, "left": "issue:43611", "reason": "Both mention v5 model loading regressions, but one is about base_model_prefix and the other about custom model initialization in notebooks.", "right": "issue:43645"}, {"accept": false, "left": "issue:44530", "reason": "PagedAttentionCache linear_attention crash and non-template-node compilation errors are different code paths.", "right": "issue:45084"}, {"accept": false, "left": "issue:43901", "reason": "Both are docs issues, but they cover different pipeline docs and different removed/changed behaviors.", "right": "issue:44509"}, {"accept": false, "left": "issue:43299", "reason": "Same broad Qwen3-VL area, but different reported failures and model variants; too weak to treat as duplicates.", "right": "issue:43931"}, {"accept": false, "left": "issue:44479", "reason": "Both involve vision/video input handling, but one is a specific v5.3.0 regression on Qwen VL models and the other is a generic mask-shape IndexError.", "right": "issue:44805"}, {"accept": false, "left": "issue:44589", "reason": "Float8 storage lookup failure and compilation of non-template nodes are unrelated errors.", "right": "issue:45084"}, {"accept": false, "left": "issue:43596", "reason": "DeepSpeed zero3/BertModel initialization and torch>=2.9 flex_attention compilation are separate issues.", "right": "issue:44683"}, {"accept": false, "left": "issue:43425", "reason": "Torch 2.10 compatibility and Qwen3.5 embedding unpacking with TRL SFT are not the same bug or fix.", "right": "issue:44918"}], "summary": "This cluster is a loose collection of Transformers v5 regression reports, but they span different models, subsystems, and even docs-only issues. None of the soft pairs looks like the same underlying bug/change closely enough to merge as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:39404|issue:44912", "issue:43931|issue:44863", "issue:43611|issue:43645", "issue:44530|issue:45084", "issue:43901|issue:44509", "issue:43299|issue:43931", "issue:44479|issue:44805", "issue:44589|issue:45084", "issue:43596|issue:44683", "issue:43425|issue:44918"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7142, "estimated_input_tokens": 3443, "item_count": 18, "node_count": 18, "serialized_chars": 13769, "soft_pair_count": 11}, "cached_at": "2026-04-14T15:36:27Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "709c0ebdc1c517fe128fe2827ac04a015329c537b09511acb3d4f698b300b96c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:32090", "issue:36032", "issue:39404", "issue:40990", "issue:42947", "issue:43452", "issue:43582", "issue:43632", "issue:43856", "issue:43957", "issue:43994", "issue:44079", "issue:44164", "issue:44488", "issue:44671", "issue:44756", "issue:44991", "issue:45081"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43856", "reason": "Both mention memory, but one is a Qwen3 MoE training inefficiency while the other is an Apple/Strix Halo mmap OOM workaround. Different root causes and fixes.", "right": "issue:44756"}, {"accept": false, "left": "issue:43994", "reason": "Siglip2 nonsensical outputs with AutoModel/pipeline is a model/inference bug; ModelOutput key assignment with None is a generic container bug. Not the same code path.", "right": "issue:44079"}, {"accept": false, "left": "issue:40990", "reason": "High perplexity on gpt-oss-20b and ineffective gradient checkpointing with PEFT LoRA are unrelated symptoms, models, and mechanisms.", "right": "issue:42947"}, {"accept": false, "left": "issue:44671", "reason": "Both are tokenizer/model loading regressions in v5, but for different models and failures: CamemBERT MLM predictions vs est-roberta tokenizer load. Not mergeable as one fix.", "right": "issue:44991"}, {"accept": false, "left": "issue:36032", "reason": "T5 tokenizer method name conflict and gguf_file handling are distinct loading bugs with different failing components.", "right": "issue:43452"}, {"accept": false, "left": "issue:39404", "reason": "Whisper pipeline return_language regression and CamemBERT MLM misprediction are separate model-specific inference issues.", "right": "issue:44671"}, {"accept": false, "left": "issue:44991", "reason": "Both are tokenizer-load failures, but one is est-roberta in v5 and the other is a Mistral regex/backend_tokenizer crash. Different models and different fixes.", "right": "issue:45081"}, {"accept": false, "left": "issue:43582", "reason": "AppleSilicon warmup TypeError and the _is_hf_initialized v5 regression are unrelated internals despite both involving v5/runtime behavior.", "right": "issue:43632"}, {"accept": false, "left": "issue:44488", "reason": "Loading cjvt/sleng-bert and Mistral regex patch failure are different tokenizer-loading paths with no shared root cause.", "right": "issue:45081"}, {"accept": false, "left": "issue:43957", "reason": "Meta-device model loading breakage and save/from_pretrained extra_state handling are separate serialization/loading bugs.", "right": "issue:44164"}, {"accept": false, "left": "issue:32090", "reason": "Trainer GPU broadcast NoneType error and Whisper return_language pipeline regression are unrelated failures in different subsystems.", "right": "issue:39404"}], "summary": "This is not a true duplicate cluster: the issues span unrelated bugs across training, tokenizer loading, model inference, memory, and meta-device/save-load code paths. All soft links are superficial subsystem similarity rather than the same underlying defect."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43856|issue:44756", "issue:43994|issue:44079", "issue:40990|issue:42947", "issue:44671|issue:44991", "issue:36032|issue:43452", "issue:39404|issue:44671", "issue:44991|issue:45081", "issue:43582|issue:43632", "issue:44488|issue:45081", "issue:43957|issue:44164", "issue:32090|issue:39404"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6454, "estimated_input_tokens": 3099, "item_count": 17, "node_count": 17, "serialized_chars": 12396, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:37:42Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b3341290aded03392b654cf310a3c6b8a9b0628c183b72f8c8a053aa295531f4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:40990", "issue:43421", "issue:43425", "issue:43611", "issue:43644", "issue:43716", "issue:43883", "issue:44164", "issue:44206", "issue:44220", "issue:44265", "issue:44479", "issue:44488", "issue:44610", "issue:44987", "issue:45084", "issue:45161"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:40990", "reason": "Different components and symptoms: perplexity/eval quality on a model vs runtime special-token post-processor updates.", "right": "issue:43421"}, {"accept": false, "left": "issue:43425", "reason": "Torch version compatibility is a different failure mode from GPT-OSS MoE tensor-parallel loading.", "right": "issue:45161"}, {"accept": false, "left": "issue:43883", "reason": "Both are loading problems, but they concern different models and different errors, with no sign of the same code path.", "right": "issue:44488"}, {"accept": false, "left": "issue:44164", "reason": "`save/from_pretrained` extra_state handling is unrelated to the video-input regression in Qwen VL models.", "right": "issue:44479"}, {"accept": false, "left": "issue:44265", "reason": "`torch.export` compatibility for `torch_compilable_check` is a different bug from the OmDet processor image-size mismatch.", "right": "issue:44610"}, {"accept": false, "left": "issue:44488", "reason": "Both are model-loading failures, but they affect different models and appear to stem from different regressions.", "right": "issue:44987"}, {"accept": false, "left": "issue:43716", "reason": "Vision preprocessor/model dtype mismatch is unrelated to the compile-time non-template-node error.", "right": "issue:45084"}, {"accept": false, "left": "issue:44206", "reason": "Both touch audio feature extraction, but one is a specific LasrFeatureExtractor regression while the other is a generic helper-function issue; not enough evidence they are the same underlying bug.", "right": "issue:44220"}, {"accept": false, "left": "issue:43611", "reason": "Both mention Transformers 5.0.0, but the failures are distinct: `base_model_prefix` loading vs junk-filled non-persistent buffers.", "right": "issue:43644"}], "summary": "The cluster is mostly a set of unrelated issue reports spanning tokenizer/runtime behavior, model loading, audio feature extraction, export/compile, vision preprocessing, and release regressions. No pair looks like a clear duplicate on the available evidence, so all soft edges are rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:40990|issue:43421", "issue:43425|issue:45161", "issue:43883|issue:44488", "issue:44164|issue:44479", "issue:44265|issue:44610", "issue:44488|issue:44987", "issue:43716|issue:45084", "issue:44206|issue:44220", "issue:43611|issue:43644"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6486, "estimated_input_tokens": 3115, "item_count": 17, "node_count": 17, "serialized_chars": 12459, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:38:28Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c03af2ae3a3df07f504918128c00149b51787aaca44ecb5b5c3a47e06a7ae73f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34567", "issue:40444", "issue:42947", "issue:43317", "issue:43452", "issue:43482", "issue:43541", "issue:43643", "issue:43644", "issue:43646", "issue:43883", "issue:44451", "issue:44530", "issue:44756", "issue:44987", "issue:44991", "issue:45084"], "result": {"analyst_result": {"best_issue_reason": "issue:43646 is the best representative only because it is a broad Transformers 5.0.0 regression affecting model initialization, which is the closest to a general cluster-level problem. It is still not a true duplicate hub.", "best_pr_reason": null, "canonical_issue_reason": "No single issue cleanly represents the cluster: the items span distinct failure modes (token counting, multimodal iterable datasets, GGUF loading, torch dynamo tracing, custom init, offload/OOM, etc.).", "canonical_pr_reason": null, "confidence": 0.86, "soft_edge_verdicts": [{"accept": false, "left": "issue:42947", "reason": "Different problems: PEFT/LoRA gradient checkpointing vs mmap/OOM on Strix Halo. Same broad training/runtime area, but not the same bug.", "right": "issue:44756"}, {"accept": false, "left": "issue:34567", "reason": "Trainer token counting not updating is unrelated to multimodal IterableDataset failures with multiple images per prompt.", "right": "issue:40444"}, {"accept": false, "left": "issue:43883", "reason": "Both are loading regressions, but one is a missing model attribute and the other is tokenizer loading for a specific model; no clear same code path.", "right": "issue:44991"}, {"accept": false, "left": "issue:43452", "reason": "Both mention GGUF/loading, but one is a generic gguf_file/from_pretrained issue and the other is a specific Qwen2.5-GGUF failure in v5. Too little evidence they are the same defect.", "right": "issue:43482"}, {"accept": false, "left": "issue:43541", "reason": "Torch dynamo tracing failure in Mixtral grouped_mm is a different compiler error from \"Can't compile non template nodes.\"", "right": "issue:45084"}, {"accept": false, "left": "issue:44451", "reason": "Both are model-loading regressions, but they affect different models with different symptoms; not enough to call them the same bug.", "right": "issue:44987"}, {"accept": false, "left": "issue:43643", "reason": "AutoConfig trust_remote_code missing fields is unrelated to PagedAttentionCache crashing on Qwen3.5 linear_attention group type.", "right": "issue:44530"}, {"accept": false, "left": "issue:43644", "reason": "Both are Transformers 5.0.0 regressions, but one corrupts non-persistent buffers and the other breaks custom model initialization; related theme, not the same bug.", "right": "issue:43646"}, {"accept": false, "left": "issue:43317", "reason": "device_map auto/offload loading failure is a different path from GGUF model loading failure.", "right": "issue:43482"}], "summary": "This cluster is mostly a grab-bag of unrelated Transformers regressions and model-specific loading bugs. The soft links are mostly broad theme similarity rather than the same underlying defect, so I would not merge them as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42947|issue:44756", "issue:34567|issue:40444", "issue:43883|issue:44991", "issue:43452|issue:43482", "issue:43541|issue:45084", "issue:44451|issue:44987", "issue:43643|issue:44530", "issue:43644|issue:43646", "issue:43317|issue:43482"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7080, "estimated_input_tokens": 3412, "item_count": 18, "node_count": 18, "serialized_chars": 13646, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:39:18Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8061595154210799c1482aad1dbf6bd06d71615f8f55c207c8a17fb0beaf4017", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36032", "issue:39404", "issue:42915", "issue:43262", "issue:43377", "issue:43404", "issue:43645", "issue:43646", "issue:43825", "issue:44403", "issue:44466", "issue:44530", "issue:44534", "issue:44849", "issue:44987", "issue:44991", "issue:45081", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": "43645 is the best single issue to anchor the cluster because it has the widest scope and aligns with the general theme of v5 breaking existing workflows.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43645 is the broadest and most representative v5 regression report in the set, centered on custom model initialization rather than a model-specific edge case.", "canonical_pr_reason": null, "confidence": 0.33, "soft_edge_verdicts": [{"accept": false, "left": "issue:36032", "reason": "Tokenizer name/method conflict in T5 is unrelated to a Qwen3Moe FP8 configuration failure.", "right": "issue:42915"}, {"accept": false, "left": "issue:43262", "reason": "Different audio bugs: one is a chat-template sampling-rate default, the other is batched-vs-single output divergence from missing padding mask support.", "right": "issue:43377"}, {"accept": false, "left": "issue:44849", "reason": "Different model families and failure modes: Qwen3.5 hidden-state behavior vs Qwen2.5-VL rope index scaling.", "right": "issue:45325"}, {"accept": false, "left": "issue:39404", "reason": "Both involve pipelines, but one is a Whisper return_language regression while the other is an error-message wording/support claim about translation tasks in v5.", "right": "issue:43825"}, {"accept": false, "left": "issue:42915", "reason": "FP8 config failure for Qwen3Moe and PagedAttentionCache crash on Qwen3.5 are separate code-path issues.", "right": "issue:44530"}, {"accept": false, "left": "issue:44987", "reason": "Both are model-loading failures, but they target different models and likely different tokenizer/model initialization bugs.", "right": "issue:44991"}, {"accept": false, "left": "issue:36032", "reason": "A T5 tokenizer method conflict is not the same underlying bug as custom model initialization breaking in Jupyter notebooks.", "right": "issue:43645"}, {"accept": false, "left": "issue:43404", "reason": "Both mention lm_head/tied weights, but one is a model-specific untied-weight bug and the other is a device-dependent serialization inconsistency.", "right": "issue:44466"}, {"accept": false, "left": "issue:43646", "reason": "Custom model initialization breakage and junk-filled non-persistent buffers are distinct regressions.", "right": "issue:44534"}, {"accept": false, "left": "issue:44403", "reason": "Generic loading noise is not the same as a Mistral tokenizer crash caused by fix_mistral_regex.", "right": "issue:45081"}], "summary": "The cluster is mostly a loose collection of unrelated Transformers v5 regression reports, with only superficial overlap in topics like model loading, tokenizers, pipelines, and tied weights. None of the soft pairs look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:36032|issue:42915", "issue:43262|issue:43377", "issue:44849|issue:45325", "issue:39404|issue:43825", "issue:42915|issue:44530", "issue:44987|issue:44991", "issue:36032|issue:43645", "issue:43404|issue:44466", "issue:43646|issue:44534", "issue:44403|issue:45081"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6778, "estimated_input_tokens": 3261, "item_count": 18, "node_count": 18, "serialized_chars": 13042, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:39:42Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7079a115d2612c29cbe567c9beeec6fd2badc64fe414ba3bbebc816beb70058b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41553", "issue:42915", "issue:43322", "issue:43377", "issue:43540", "issue:43582", "issue:43645", "issue:43819", "issue:43994", "issue:44220", "issue:44451", "issue:44534", "issue:44610", "issue:44683", "issue:45081", "issue:45084", "issue:45216", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": "Issue 45216 is the strongest canonical anchor because it states the regression cause and scope more broadly; the other issues are distinct model-specific or subsystem-specific failures.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45216 is the better canonical issue: it describes the broader regression in `save_pretrained` starting at 5.4.0, while 45357 reads as a later, more specific manifestation of the same Qwen3.5 visual-encoder key serialization bug.", "canonical_pr_reason": null, "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "issue:43994", "reason": "Both are vision-model output problems, but they affect different models and different failure modes (SigLIP2 nonsense outputs vs OmDet-Turbo processor/model size mismatch).", "right": "issue:44610"}, {"accept": false, "left": "issue:44451", "reason": "Both are loading/tokenizer-related, but one is a ScandiBERT load failure and the other is a Mistral regex patch crash with a different root cause and code path.", "right": "issue:45081"}, {"accept": false, "left": "issue:43322", "reason": "Both involve multimodal models, but the symptoms and code paths differ substantially: Llava Next segfault vs Qwen3OmniMoe video-input ValueError.", "right": "issue:43540"}, {"accept": true, "left": "issue:45216", "reason": "These describe the same Qwen3.5 `save_pretrained` regression: incorrect serialized checkpoint/visual-encoder keys, with 45357 appearing to be a later version-specific report of the same bug.", "right": "issue:45357"}, {"accept": false, "left": "issue:43645", "reason": "Both mention Transformers v5 regressions, but one is about custom model initialization in Jupyter and the other about non-persistent buffers being filled with junk; not the same bug.", "right": "issue:44534"}, {"accept": false, "left": "issue:44220", "reason": "Different subsystems and failures: `_torch_extract_fbank_features()` in audio preprocessing versus compiled `flex_attention` failing on newer torch.", "right": "issue:44683"}, {"accept": false, "left": "issue:43377", "reason": "Both concern output mismatches, but they are different models and different missing mechanisms (padding mask support vs missing STE), so not the same underlying bug.", "right": "issue:43819"}, {"accept": false, "left": "issue:41553", "reason": "Completely unrelated: a bad AutoTokenizer error message for Voxtral versus a TypeError in Apple Silicon allocator warmup.", "right": "issue:43582"}, {"accept": false, "left": "issue:42915", "reason": "No meaningful overlap in code path or failure mode: Qwen3Moe FP8 config failure versus a template-node compilation TypeError.", "right": "issue:45084"}], "summary": "The set is mostly a collection of unrelated bug reports. The only clear duplicate-like pair is the Qwen3.5 `save_pretrained` regression reports, which describe the same checkpoint serialization problem across versions."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43994|issue:44610", "issue:44451|issue:45081", "issue:43322|issue:43540", "issue:45216|issue:45357", "issue:43645|issue:44534", "issue:44220|issue:44683", "issue:43377|issue:43819", "issue:41553|issue:43582", "issue:42915|issue:45084"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6602, "estimated_input_tokens": 3173, "item_count": 17, "node_count": 17, "serialized_chars": 12692, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:40:18Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "24499572cab2e8610e11abfd56858ab3d2d0a167d29486cce8e425782decb717", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:40990", "issue:43296", "issue:43317", "issue:43404", "issue:43531", "issue:43632", "issue:43856", "issue:44062", "issue:44265", "issue:44479", "issue:44756", "issue:44811", "issue:44987", "issue:45072", "issue:45081", "issue:45127", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": "No single issue is a good global representative; the set is too heterogeneous to consolidate around one report.", "best_pr_reason": null, "canonical_issue_reason": "No canonical issue: these are distinct bugs spanning unrelated models, tokenizer paths, export/inference paths, and memory/offload behaviors.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43632", "reason": "Different problems: one is an `_is_hf_initialized` flag regression, the other is mmap-related OOM on Strix Halo.", "right": "issue:44756"}, {"accept": false, "left": "issue:44987", "reason": "Both involve loading, but one is a model load failure for `physical-intelligence/fast` and the other is a Mistral tokenizer regex patch crash; different code paths.", "right": "issue:45081"}, {"accept": false, "left": "issue:43296", "reason": "Unrelated failures: PaddleOCR-VL loading vs Qwen3-MoE sliding_window behavior.", "right": "issue:43531"}, {"accept": false, "left": "issue:43404", "reason": "Both mention tied weights, but one is a missing `lm_head` tie in Mistral3 multimodal generation and the other is LoRA merge collapse with extended vocabulary; not the same bug.", "right": "issue:45127"}, {"accept": false, "left": "issue:44062", "reason": "`AddedToken(special=...)` argument collision is a tokenizer API error, not the same as the `physical-intelligence/fast` load regression.", "right": "issue:44987"}, {"accept": false, "left": "issue:44479", "reason": "Both are Qwen VL-related, but one is a video-input regression and the other is still-image RoPE/position-id scaling; different functionality and fixes.", "right": "issue:45325"}, {"accept": false, "left": "issue:40990", "reason": "Perplexity on gpt-oss-20b and Whisper `batch_decode()` skipping special tokens are unrelated issues.", "right": "issue:44811"}, {"accept": false, "left": "issue:44265", "reason": "`torch.export.export` with `torch_compilable_check` is a compile/export bug, while the other is a bfloat16 dtype mismatch in inference.", "right": "issue:45072"}, {"accept": false, "left": "issue:43317", "reason": "GPU+CPU offload load failure for dequantized models and Qwen3 MoE training memory inefficiency are different resource-management problems.", "right": "issue:43856"}], "summary": "The soft-similarity pairs are mostly superficial matches: same broad library or model family, but different bug mechanisms and different code paths. I do not see any true duplicate pairs here."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43632|issue:44756", "issue:44987|issue:45081", "issue:43296|issue:43531", "issue:43404|issue:45127", "issue:44062|issue:44987", "issue:44479|issue:45325", "issue:40990|issue:44811", "issue:44265|issue:45072", "issue:43317|issue:43856"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6996, "estimated_input_tokens": 3370, "item_count": 18, "node_count": 18, "serialized_chars": 13479, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:41:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "474bc74dca844e92fa881a40e202812694b96a541b68ca365aec1dbdf82ac933", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36032", "issue:40444", "issue:42915", "issue:43421", "issue:43531", "issue:43611", "issue:43632", "issue:43742", "issue:43818", "issue:44291", "issue:44534", "issue:44568", "issue:44589", "issue:44683", "issue:44877", "issue:45020", "issue:45325", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": "issue:45020 is the strongest standalone issue to keep as the general tracker: it is open, broad in scope, and captures a current transformer-version regression pattern.", "best_pr_reason": null, "canonical_issue_reason": "issue:45020 is the broadest umbrella regression report in the set and the best single anchor if one issue must represent the cluster.", "canonical_pr_reason": null, "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "issue:42915", "reason": "Different failures: Qwen3MoE + FineGrainedFP8Config versus init_empty_weights/_is_hf_initialized loading error. Same general area, not the same bug.", "right": "issue:44291"}, {"accept": false, "left": "issue:43611", "reason": "Both mention Transformers v5 regressions, but one is about base_model_prefix loading and the other about non-persistent buffers being filled incorrectly. Distinct code paths.", "right": "issue:44534"}, {"accept": false, "left": "issue:43421", "reason": "Both involve special tokens, but one is runtime post-processor synchronization and the other is add_special_tokens not adding BOS/EOS for a specific tokenizer. Not the same issue.", "right": "issue:44568"}, {"accept": false, "left": "issue:42915", "reason": "Unrelated problems: FP8 config failure for Qwen3MoE versus a Float8 storage lookup TypeError.", "right": "issue:44589"}, {"accept": false, "left": "issue:43632", "reason": "Different regressions: _is_hf_initialized flag handling versus junk-filled non-persistent buffers.", "right": "issue:44534"}, {"accept": false, "left": "issue:36032", "reason": "T5Tokenizer add_special_tokens method conflict is unrelated to a MobileLLM-125M key error when loading a model.", "right": "issue:43742"}, {"accept": false, "left": "issue:40444", "reason": "Both are Qwen2.5-VL related, but one is an iterable-dataset multi-image training failure and the other is a still-image rope index scaling bug. Different concrete behavior.", "right": "issue:45325"}, {"accept": false, "left": "issue:43818", "reason": "Different models and different bugs: Video-LLaVA tower/temporal-attention issue versus Qwen3.5 save_pretrained key serialization.", "right": "issue:45357"}, {"accept": false, "left": "issue:43531", "reason": "Qwen3-MoE sliding_window behavior and granite_speech strict-config loading are separate issues.", "right": "issue:44877"}, {"accept": false, "left": "issue:44683", "reason": "Torch 2.9 flex_attention compilation failure is unrelated to remote_code model-loading breakages in recent Transformers.", "right": "issue:45020"}], "summary": "These items are mostly unrelated bug reports spanning tokenizers, model loading, multimodal models, attention, and config regressions. None of the suggested soft pairs look like the same underlying bug or change, so I would not merge any of them as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42915|issue:44291", "issue:43611|issue:44534", "issue:43421|issue:44568", "issue:42915|issue:44589", "issue:43632|issue:44534", "issue:36032|issue:43742", "issue:40444|issue:45325", "issue:43818|issue:45357", "issue:43531|issue:44877", "issue:44683|issue:45020"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6852, "estimated_input_tokens": 3298, "item_count": 18, "node_count": 18, "serialized_chars": 13190, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:41:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "318e679cc0efc29bdec87070bbdd3a522868ed3b2c29db9ef1ee973c0006151f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33666", "issue:35707", "issue:36032", "issue:40444", "issue:41720", "issue:43425", "issue:43482", "issue:43525", "issue:43792", "issue:44079", "issue:44186", "issue:44265", "issue:44403", "issue:44534", "issue:44589", "issue:44821", "issue:44991", "issue:45084"], "result": {"analyst_result": {"best_issue_reason": "Issue #44403 is the best single issue candidate because it is open, broadly scoped, and centrally about transformer loading behavior rather than a model-specific or environment-specific failure.", "best_pr_reason": null, "canonical_issue_reason": "If one issue must represent this set, issue #44403 is the broadest and most generic report, with active discussion and a title that best fits a catch-all transformer-loading problem. That said, the overall set is not a duplicate cluster.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:44821", "reason": "Different failures: loading AutoImageProcessor from a URL versus a compilation error about non-template nodes. No shared code path or concrete bug.", "right": "issue:45084"}, {"accept": false, "left": "issue:43425", "reason": "Torch 2.10 incompatibility is an environment/version issue; 44403 is about noisy loading logs. Related broad area, but not the same bug.", "right": "issue:44403"}, {"accept": false, "left": "issue:36032", "reason": "T5 tokenizer attribute collision and Qwen3 auto device-map CUDA assert are unrelated model/runtime failures.", "right": "issue:41720"}, {"accept": false, "left": "issue:43792", "reason": "Whisper loading/runtime failure versus missing Float8 storage type. Different error surfaces and likely different root causes.", "right": "issue:44589"}, {"accept": false, "left": "issue:44186", "reason": "Tokenizer crash on NER/padding versus torch.export failure with torch_compilable_check. Same ecosystem, but distinct code paths and symptoms.", "right": "issue:44265"}, {"accept": false, "left": "issue:44403", "reason": "Both involve loading, but one is about unnecessary warning noise and the other is a hard tokenizer-loading failure for a specific model.", "right": "issue:44991"}, {"accept": false, "left": "issue:33666", "reason": "Both concern Qwen VL training, but one is multi-GPU training and the other is IterableDataset/multi-image finetuning. Not the same concrete bug.", "right": "issue:40444"}, {"accept": false, "left": "issue:35707", "reason": "Progressive generation with inputs_embeds/past_key_values is unrelated to finetuning Qwen2.5-VL with multi-image IterableDataset input.", "right": "issue:40444"}, {"accept": false, "left": "issue:44079", "reason": "ModelOutput key handling bug and non-persistent buffer junk are separate internal behaviors with different affected code paths.", "right": "issue:44534"}, {"accept": false, "left": "issue:43482", "reason": "Both are version/config compatibility problems, but one is GGUF loading in transformers v5 and the other is a missing pad_token_id on Llama4Config. Different underlying defects.", "right": "issue:43525"}], "summary": "These items are a grab-bag of unrelated bug reports across different models and subsystems. None of the soft pairs look like the same underlying issue, so no duplicate merges are justified."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44821|issue:45084", "issue:43425|issue:44403", "issue:36032|issue:41720", "issue:43792|issue:44589", "issue:44186|issue:44265", "issue:44403|issue:44991", "issue:33666|issue:40444", "issue:35707|issue:40444", "issue:44079|issue:44534", "issue:43482|issue:43525"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7138, "estimated_input_tokens": 3441, "item_count": 18, "node_count": 18, "serialized_chars": 13764, "soft_pair_count": 11}, "cached_at": "2026-04-14T15:42:08Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "bbfbf90064c316ecc9a8ebde0ed37c958917e61de62912025e13f69c01d990ec", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:40444", "issue:41720", "issue:42175", "issue:43296", "issue:43531", "issue:43606", "issue:43644", "issue:43749", "issue:43761", "issue:43819", "issue:43881", "issue:44079", "issue:44265", "issue:44291", "issue:44488", "issue:44589", "issue:45081", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "issue:43749 is the best issue to keep as the cluster representative because it describes a concrete, widely impactful training/loading regression rather than a narrow model-specific failure.", "best_pr_reason": null, "canonical_issue_reason": "issue:43749 is the most broadly scoped framework regression in the set and has strong discussion/inbound activity, making it the best single anchor if one issue must represent the cluster.", "canonical_pr_reason": null, "confidence": 0.18, "soft_edge_verdicts": [{"accept": false, "left": "issue:44291", "reason": "Both are loading/type-related, but one is an init_empty_weights argument regression and the other is a missing Float8 storage type; different failure modes and likely different fixes.", "right": "issue:44589"}, {"accept": false, "left": "issue:43606", "reason": "CPU offload device mismatch in a model load path is unrelated to torch.export failing on torch_compilable_check; different code paths and remedies.", "right": "issue:44265"}, {"accept": false, "left": "issue:43606", "reason": "Bark CPU-offload device mismatch does not match DAC.from_latents vs forward mismatch; no concrete shared bug.", "right": "issue:43819"}, {"accept": false, "left": "issue:43749", "reason": "FSDP CPU-efficient loading and ModelOutput key assignment are separate framework behaviors with no common underlying defect.", "right": "issue:44079"}, {"accept": false, "left": "issue:43761", "reason": "CLIPVisionModel hidden_states regression is a model forward bug, not the same as ModelOutput key bookkeeping.", "right": "issue:44079"}, {"accept": false, "left": "issue:43644", "reason": "Non-persistent buffer initialization junk is unrelated to ModelOutput key assignment.", "right": "issue:44079"}, {"accept": false, "left": "issue:42175", "reason": "Missing TensorFlow in a pip extra is a packaging/dependency issue, while PaddleOCR-VL load failure in vLLM is a model/runtime compatibility issue.", "right": "issue:43296"}, {"accept": false, "left": "issue:43881", "reason": "These are different model load failures for different models with no evidence of the same root cause.", "right": "issue:44488"}, {"accept": false, "left": "issue:40444", "reason": "Both mention Qwen2.5-VL, but one is finetuning with multiple images per prompt and the other is video vision_position_ids; the failure surfaces differ and the root cause is not established as the same.", "right": "issue:45381"}, {"accept": false, "left": "issue:41720", "reason": "Qwen3 auto device mapping cudaErrorAssert is unrelated to a Mistral tokenizer regex patch crash.", "right": "issue:45081"}, {"accept": false, "left": "issue:43531", "reason": "A sliding_window issue in Qwen3-MoE is not the same bug as an init_empty_weights unexpected-argument TypeError.", "right": "issue:44291"}], "summary": "This cluster is highly over-broad: the paired issues span unrelated models, bugs, and subsystems, so none of the soft edges look like true duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44291|issue:44589", "issue:43606|issue:44265", "issue:43606|issue:43819", "issue:43749|issue:44079", "issue:43761|issue:44079", "issue:43644|issue:44079", "issue:42175|issue:43296", "issue:43881|issue:44488", "issue:40444|issue:45381", "issue:41720|issue:45081", "issue:43531|issue:44291"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6512, "estimated_input_tokens": 3128, "item_count": 17, "node_count": 17, "serialized_chars": 12512, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:43:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0375d673267430e4e0f91309e0106286437139f10b052b708c4a899bff964452", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42915", "issue:43454", "issue:43482", "issue:43493", "issue:43611", "issue:43632", "issue:43646", "issue:43828", "issue:43994", "issue:44387", "issue:44466", "issue:44610", "issue:44617", "issue:44683", "issue:45072", "issue:45325", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "43493 is the strongest representative issue: it is root-cause oriented, specific to the underlying SigLIP2 bug, and can subsume 43994's concrete user-facing failure.", "best_pr_reason": null, "canonical_issue_reason": "No single cluster-wide duplicate is obvious, but 43493 is the best anchor for the only plausible duplicate pair because it describes the underlying SigLIP2 implementation discrepancy rather than just the downstream bad-output symptom.", "canonical_pr_reason": null, "confidence": 0.67, "soft_edge_verdicts": [{"accept": false, "left": "issue:44610", "reason": "Different bugs and code paths: OmDet-Turbo input-size mismatch vs Qwen2.5-VL rope/position-id scaling.", "right": "issue:45325"}, {"accept": false, "left": "issue:45325", "reason": "Same model family, but one targets still-image temporal positions and the other video vision_position_ids; not clearly the same bug.", "right": "issue:45381"}, {"accept": false, "left": "issue:43828", "reason": "Autocast dtype mismatch on Phi-tiny-MoE is unrelated to compiled flex_attention breaking on torch >= 2.9.", "right": "issue:44683"}, {"accept": false, "left": "issue:44387", "reason": "Both are OOMs, but one is an int4 memory regression and the other is a model-specific Sam3Video OOM.", "right": "issue:44617"}, {"accept": false, "left": "issue:43611", "reason": "Both relate to Transformers v5 initialization, but the concrete failures differ: base_model_prefix loading vs custom model initialization.", "right": "issue:43646"}, {"accept": false, "left": "issue:43632", "reason": "Different regressions: _is_hf_initialized handling vs bf16 dtype mismatches in SwitchTransformers/TimmWrapperModel.", "right": "issue:45072"}, {"accept": false, "left": "issue:42915", "reason": "Different model/loading failures: Qwen3Moe FineGrainedFP8Config vs Qwen2.5-GGUF loading on v5.", "right": "issue:43482"}, {"accept": true, "left": "issue:43493", "reason": "Both point to the same SigLIP2 correctness problem; 43994 looks like a concrete symptom of the HF-vs-JAX implementation discrepancy in 43493.", "right": "issue:43994"}, {"accept": false, "left": "issue:43454", "reason": "Both mention tied lm_head weights, but one is a model-specific tie failure and the other is a serialization inconsistency; not the same concrete bug.", "right": "issue:44466"}], "summary": "Mostly a heterogeneous set of unrelated bug reports. Only the two SigLIP2 issues look like true duplicates; the rest are distinct regressions across different models/code paths."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44610|issue:45325", "issue:45325|issue:45381", "issue:43828|issue:44683", "issue:44387|issue:44617", "issue:43611|issue:43646", "issue:43632|issue:45072", "issue:42915|issue:43482", "issue:43493|issue:43994", "issue:43454|issue:44466"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6872, "estimated_input_tokens": 3308, "item_count": 18, "node_count": 18, "serialized_chars": 13230, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:44:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "71e27b8168f257d2b84e183d502a0edff03cd47abba3c976c9764d630b50a116", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34634", "issue:35707", "issue:43262", "issue:43317", "issue:43526", "issue:43746", "issue:43792", "issue:43873", "issue:43881", "issue:43883", "issue:43957", "issue:44186", "issue:44291", "issue:44877", "issue:44898", "issue:44991", "issue:45072", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": "#43873 is the most suitable triage anchor because it is open, broadly framed, and closest to the only semi-related offloading pair.", "best_pr_reason": null, "canonical_issue_reason": "If a single anchor is needed, #43873 is the broadest and most actively discussed open report, but the set does not form a true duplicate cluster.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43792", "reason": "Both are load/runtime failures, but one is a Whisper model-specific run failure and the other is a meta-device loading regression affecting different models; not the same bug.", "right": "issue:43957"}, {"accept": false, "left": "issue:43526", "reason": "Completely different areas: BeitImageProcessor label reduction vs Molmo tied-weights attribute error. No shared code path.", "right": "issue:43883"}, {"accept": false, "left": "issue:44186", "reason": "Tokenizer NER/padding crash and bfloat16 dtype mismatch in different model wrappers are unrelated failures.", "right": "issue:45072"}, {"accept": false, "left": "issue:43881", "reason": "Different models and different symptoms: glm-4v-9b loading failure vs tokenizer loading regression for EMBEDDIA/est-roberta.", "right": "issue:44991"}, {"accept": false, "left": "issue:43317", "reason": "Both involve offloading/quantization, but one is a dequantized-model load failure with device_map=auto and the other is a broader offloading behavior complaint; too generic to call the same bug.", "right": "issue:43873"}, {"accept": false, "left": "issue:43262", "reason": "Audio sampling-rate defaulting in apply_chat_template is unrelated to GraniteSpeech PEFT checkpoint loading.", "right": "issue:43746"}, {"accept": false, "left": "issue:44898", "reason": "Both touch vision/position handling, but the Perceiver resolution bug and Qwen2.5-VL rope-index scaling issue are different model-specific code paths.", "right": "issue:45325"}, {"accept": false, "left": "issue:44291", "reason": "init_empty_weights/_is_hf_initialized constructor mismatch is a different loading breakage from strict config rejection for granite_speech.", "right": "issue:44877"}, {"accept": false, "left": "issue:34634", "reason": "BarkProcessor voice_preset handling and progressive generation with inputs_embeds/past_key_values are unrelated issues.", "right": "issue:35707"}], "summary": "All suggested links are superficial text similarity, not the same underlying bug. The only near-theme match is the offloading/quantization pair, but it still looks like different failure modes, so every soft edge should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43792|issue:43957", "issue:43526|issue:43883", "issue:44186|issue:45072", "issue:43881|issue:44991", "issue:43317|issue:43873", "issue:43262|issue:43746", "issue:44898|issue:45325", "issue:44291|issue:44877", "issue:34634|issue:35707"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7062, "estimated_input_tokens": 3403, "item_count": 18, "node_count": 18, "serialized_chars": 13611, "soft_pair_count": 11}, "cached_at": "2026-04-14T15:44:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "17a2d9fccd6ed3eb4ab867299d78e1be80de7ae54643874840fbc5a58f99f2de", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:35707", "issue:39692", "issue:40444", "issue:41720", "issue:43296", "issue:43377", "issue:43550", "issue:43632", "issue:43720", "issue:43749", "issue:43792", "issue:44164", "issue:44534", "issue:44610", "issue:44683", "issue:44877", "issue:44898", "issue:44991"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43550", "reason": "Different failures: torch.compile+SDPA in Bamba-9B-v2 vs accelerate loading of packed BitNet weights.", "right": "issue:43720"}, {"accept": false, "left": "issue:43632", "reason": "Both are v5 regressions, but one is `_is_hf_initialized` and the other is `extra_state` handling; different code paths and symptoms.", "right": "issue:44164"}, {"accept": false, "left": "issue:35707", "reason": "Unrelated problems: generation with `inputs_embeds/past_key_values` vs Qwen3 auto device-map CUDA assert.", "right": "issue:41720"}, {"accept": false, "left": "issue:44610", "reason": "Both involve image resolution mismatches, but they affect different models and different preprocessing/model expectations.", "right": "issue:44898"}, {"accept": false, "left": "issue:43296", "reason": "Different models and loaders: PaddleOCR-VL/vLLM load failure vs tokenizer loading for EMBEDDIA/est-roberta in transformers v5.", "right": "issue:44991"}, {"accept": false, "left": "issue:43296", "reason": "Different causes: PaddleOCR-VL load regression vs strict config rejecting `granite_speech`.", "right": "issue:44877"}, {"accept": false, "left": "issue:43377", "reason": "MIMI batching/padding-mask bug is unrelated to OmDet-Turbo processor output-size mismatch.", "right": "issue:44610"}, {"accept": false, "left": "issue:43377", "reason": "Different models and bugs: MIMI batch-vs-single inconsistency vs Perceiver resolution/interpolation failure.", "right": "issue:44898"}, {"accept": false, "left": "issue:43792", "reason": "Whisper model runtime failure and compiled flex_attention on torch>=2.9 are separate issues.", "right": "issue:44683"}, {"accept": false, "left": "issue:43749", "reason": "FSDP CPU RAM efficient loading breakage is not the same as non-persistent buffers being filled with junk.", "right": "issue:44534"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 doc-example errors and Qwen2.5-VL iterable-dataset multi-image finetuning failure are different multimodal issues.", "right": "issue:40444"}], "summary": "These items are not duplicates of one another; they are a loose cluster of unrelated Transformers/model-loading bugs connected only by superficial similarity in wording."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43550|issue:43720", "issue:43632|issue:44164", "issue:35707|issue:41720", "issue:44610|issue:44898", "issue:43296|issue:44991", "issue:43296|issue:44877", "issue:43377|issue:44610", "issue:43377|issue:44898", "issue:43792|issue:44683", "issue:43749|issue:44534", "issue:39692|issue:40444"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6910, "estimated_input_tokens": 3327, "item_count": 17, "node_count": 17, "serialized_chars": 13308, "soft_pair_count": 11}, "cached_at": "2026-04-14T15:45:35Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9f789b58a05c74c971a3b83bee4f49f1d4de704af9d3c0b80b61a58c672c564c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41720", "issue:43322", "issue:43404", "issue:43454", "issue:43526", "issue:43550", "issue:43606", "issue:43720", "issue:43881", "issue:44186", "issue:44423", "issue:44466", "issue:44610", "issue:44977", "issue:45072", "issue:45357", "issue:45406"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43550", "reason": "Both are runtime failures, but one is torch.compile/SDPA on Bamba and the other is CPU offload device mismatch on bark-small; different code paths and failure modes.", "right": "issue:43606"}, {"accept": false, "left": "issue:44466", "reason": "Both concern saving/tied weights in v5, but 44466 is generic device-dependent lm_head serialization while 45357 is Qwen3.5 visual encoder key regression; not the same bug.", "right": "issue:45357"}, {"accept": false, "left": "issue:43454", "reason": "43454 is missing lm_head weight tying in a multimodal model; 45072 is dtype mismatch in bfloat16 inference for different models. Too different to merge.", "right": "issue:45072"}, {"accept": false, "left": "issue:43454", "reason": "One is weight tying/text generation corruption; the other is compile-time SDPA failure. No shared concrete code-path bug.", "right": "issue:43550"}, {"accept": false, "left": "issue:44423", "reason": "Both crash in serve with multimodal processors, but the errors differ: string `.to()` misuse vs missing `_tokenizer` on Gemma4Processor. Different root causes.", "right": "issue:45406"}, {"accept": false, "left": "issue:44186", "reason": "Both are processor/preprocessing bugs, but LayoutLMv2Tokenizer NER/padding crashes are unrelated to OmDet-Turbo producing the wrong input resolution.", "right": "issue:44610"}, {"accept": false, "left": "issue:41720", "reason": "Qwen3 auto device mapping cudaErrorAssert and Llava Next segmentation fault are separate loading failures with no clear shared mechanism.", "right": "issue:43322"}, {"accept": false, "left": "issue:41720", "reason": "One is an A800 auto-device-map assert during loading; the other is Qwen3.5 flash-attention generation corruption. Different paths.", "right": "issue:44977"}, {"accept": false, "left": "issue:43404", "reason": "43404 is tied lm_head weight missing in Mistral3ForConditionalGeneration; 45357 is incorrect visual encoder keys on save_pretrained for Qwen3.5. Not the same issue.", "right": "issue:45357"}, {"accept": false, "left": "issue:43526", "reason": "BeitImageProcessorFast reduce_labels returning one label is unrelated to glm-4v-9b load failure.", "right": "issue:43881"}, {"accept": false, "left": "issue:43454", "reason": "43454 is about lm_head tying/generation quality; 43720 is BitNet packed-weight unpacking during accelerate loading. Different bugs.", "right": "issue:43720"}], "summary": "No soft-edge pair looks like the same underlying bug/change. These are mostly distinct model-specific runtime, serialization, tokenizer, and serving issues with only broad subsystem overlap."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43550|issue:43606", "issue:44466|issue:45357", "issue:43454|issue:45072", "issue:43454|issue:43550", "issue:44423|issue:45406", "issue:44186|issue:44610", "issue:41720|issue:43322", "issue:41720|issue:44977", "issue:43404|issue:45357", "issue:43526|issue:43881", "issue:43454|issue:43720"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6808, "estimated_input_tokens": 3276, "item_count": 18, "node_count": 18, "serialized_chars": 13104, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:46:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ec914162ad16f0734a4cf0cedaa874ca9c9ac7dbf7305081189676627a8669cd", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34567", "issue:40990", "issue:42617", "issue:42915", "issue:43377", "issue:43577", "issue:43606", "issue:43708", "issue:43720", "issue:43873", "issue:44186", "issue:44361", "issue:44479", "issue:44756", "issue:44811", "issue:44877", "issue:44918", "issue:44991"], "result": {"analyst_result": {"best_issue_reason": "43873 is the best overall issue candidate because it is open, broadly scoped, and closest to a reusable umbrella for nearby offloading/quantization reports.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43873 is the broadest and most generally phrased report in the set, so it is the least-bad anchor for this heterogeneous cluster.", "canonical_pr_reason": null, "confidence": 0.59, "soft_edge_verdicts": [{"accept": false, "left": "issue:34567", "reason": "TrainerState token counting and gpt-oss perplexity are unrelated symptoms and code paths.", "right": "issue:40990"}, {"accept": false, "left": "issue:43873", "reason": "Both mention memory/offloading, but one is quantization offload behavior and the other is mmap OOM on Strix Halo; different bugs.", "right": "issue:44756"}, {"accept": false, "left": "issue:43577", "reason": "One is dtype propagation in BLIP-2 loading, the other is resume_from_checkpoint max_steps math; no shared bug.", "right": "issue:43708"}, {"accept": false, "left": "issue:43377", "reason": "MIMI batched-vs-single output mismatch and Whisper batch_decode skip_special_tokens are different model/processing issues.", "right": "issue:44811"}, {"accept": false, "left": "issue:44186", "reason": "Both are tokenizer failures, but for different tokenizers, tasks, and failure modes.", "right": "issue:44361"}, {"accept": false, "left": "issue:44877", "reason": "Strict config loading for granite_speech and tokenizer loading for EMBEDDIA/est-roberta are separate regression reports.", "right": "issue:44991"}, {"accept": false, "left": "issue:42617", "reason": "A script runtime failure and a FineGrainedFP8Config model failure are not the same underlying bug.", "right": "issue:42915"}, {"accept": false, "left": "issue:44479", "reason": "Both involve Qwen3.5 family, but one is a video-input regression and the other is a TRL SFT embedding-unpacking failure.", "right": "issue:44918"}, {"accept": false, "left": "issue:43606", "reason": "Both involve loading/offload paths, but Bark CPU offload device mismatch and BitNet packed-weight unpacking are distinct bugs.", "right": "issue:43720"}], "summary": "The cluster is mostly a false-positive mix of unrelated issues. The soft pairs share broad themes like loading, offloading, tokenization, or trainer behavior, but each reports a different concrete bug path and should not be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:34567|issue:40990", "issue:43873|issue:44756", "issue:43577|issue:43708", "issue:43377|issue:44811", "issue:44186|issue:44361", "issue:44877|issue:44991", "issue:42617|issue:42915", "issue:44479|issue:44918", "issue:43606|issue:43720"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6612, "estimated_input_tokens": 3178, "item_count": 17, "node_count": 17, "serialized_chars": 12709, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:47:32Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e4de8772f23336427b3422559a31097081a90f2a99a6f364fffe172a2ac2e205", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33357", "issue:35707", "issue:36010", "issue:41720", "issue:42617", "issue:42915", "issue:43322", "issue:43388", "issue:43540", "issue:43782", "issue:43873", "issue:44292", "issue:44479", "issue:44514", "issue:44545", "issue:45059", "issue:45161"], "result": {"analyst_result": {"best_issue_reason": "issue:44514 is the best representative for the cluster\u2019s only true duplicate pair because it is the more established, higher-signal report with the same concrete failure mode.", "best_pr_reason": null, "canonical_issue_reason": "issue:44514 is the stronger canonical issue: it reports the exact same bug as 44545, was opened earlier, and has much higher inbound reference/activity, making it the better duplicate target.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:44292", "reason": "Both involve Qwen model/runtime errors, but one is an NVFP4 execution issue and the other is a video-input regression; different bugs.", "right": "issue:44479"}, {"accept": false, "left": "issue:41720", "reason": "Both mention MoE/parallel execution problems, but the failures are different: CUDA assert on A800 vs only-TP not working for GPT-OSS.", "right": "issue:45161"}, {"accept": false, "left": "issue:41720", "reason": "These are unrelated failure modes; Qwen3 auto device mapping CUDA assert is not the same as not being able to run 3d_parallel.py.", "right": "issue:42617"}, {"accept": false, "left": "issue:43388", "reason": "Different domains and symptoms: gather_for_metrics last-batch label dropping vs SAM3 PCS text+box behavior.", "right": "issue:45059"}, {"accept": false, "left": "issue:43540", "reason": "One is a video-input ValueError in Qwen3OmniMoe; the other is a from_pretrained weight_only=True error in Qwen3VL. Similar family, different bug.", "right": "issue:43782"}, {"accept": false, "left": "issue:42915", "reason": "Both are model-loading/runtime issues, but Qwen3Moe FineGrainedFP8Config failure and Llava Next segmentation fault are distinct code paths.", "right": "issue:43322"}, {"accept": false, "left": "issue:41720", "reason": "Auto device mapping CUDA assert and offloading-with-quantization misbehavior are related broadly to placement, but not the same concrete bug.", "right": "issue:43873"}, {"accept": false, "left": "issue:36010", "reason": "ImportError for GenerationMixin and inability to run 3d_parallel.py are unrelated issues.", "right": "issue:42617"}, {"accept": true, "left": "issue:44514", "reason": "Same exact title and described failure: apply_chat_template crashes on batched input when padding=False; 44545 is a near-duplicate of 44514.", "right": "issue:44545"}, {"accept": false, "left": "issue:33357", "reason": "A MacOS bus error loading a CLIP model and a generation issue with inputs_embeds/past_key_values are unrelated.", "right": "issue:35707"}], "summary": "This cluster is mostly a set of unrelated issue reports. The only clear duplicate pair is the two nearly identical Qwen2_5_VLProcessor.apply_chat_template crashes with padding=False; the rest are only loosely similar by subsystem or model family."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44292|issue:44479", "issue:41720|issue:45161", "issue:41720|issue:42617", "issue:43388|issue:45059", "issue:43540|issue:43782", "issue:42915|issue:43322", "issue:41720|issue:43873", "issue:36010|issue:42617", "issue:44514|issue:44545", "issue:33357|issue:35707"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6868, "estimated_input_tokens": 3306, "item_count": 18, "node_count": 18, "serialized_chars": 13224, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:48:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fd7112b81348c21802af92c1167ead41fcb663e09703288d74fe091d7d4b0ebf", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30064", "issue:34634", "issue:41720", "issue:43381", "issue:43606", "issue:43653", "issue:43720", "issue:43746", "issue:43782", "issue:43844", "issue:44368", "issue:44451", "issue:44492", "issue:44509", "issue:44991", "issue:45072", "issue:45357", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:30064", "reason": "Different processors and failure modes: void segmentation-map handling vs Bark voice_preset behavior. Same general API area, but not the same bug.", "right": "issue:34634"}, {"accept": false, "left": "issue:41720", "reason": "Different models and code paths: Qwen3 auto device mapping cudaErrorAssert vs GraniteSpeech PEFT adapter loading from local checkpoints.", "right": "issue:43746"}, {"accept": false, "left": "issue:44492", "reason": "Both are docs-related, but one is a cache-strategy typo and the other is stale pipeline-task documentation. Not the same change or defect.", "right": "issue:44509"}, {"accept": false, "left": "issue:43653", "reason": "Tokenizer special-token registration vs BitNet packed-weight unpacking during accelerate loading are unrelated implementation bugs.", "right": "issue:43720"}, {"accept": false, "left": "issue:44368", "reason": "A LoRA config warning about tie_word_embeddings is distinct from saving incorrect visual-encoder keys in Qwen3.5.", "right": "issue:45357"}, {"accept": false, "left": "issue:43381", "reason": "Gradient checkpointing being disallowed in eval mode is not the same as gradient abnormally increasing under DeepSpeed ZeRO-3.", "right": "issue:43844"}, {"accept": false, "left": "issue:43606", "reason": "Both involve inference mismatches, but one is CPU offload device placement and the other is bf16 dtype mismatch in different model wrappers.", "right": "issue:45072"}, {"accept": false, "left": "issue:43782", "reason": "from_pretrained weight_only loading failure and wrong vision_position_ids for video input are different Qwen-VL issues.", "right": "issue:45381"}, {"accept": false, "left": "issue:44451", "reason": "Both are model-loading failures, but for different models and different breakpoints: ScandiBERT load failure vs est-roberta tokenizer loading on v5.", "right": "issue:44991"}], "summary": "This cluster is heterogeneous: it mixes unrelated bugs in image processing, Bark/Qwen/Granite/BigBird/BitNet loading, training-time behavior, dtype/device mismatches, and documentation issues. None of the soft pairs looks like the same underlying defect."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:30064|issue:34634", "issue:41720|issue:43746", "issue:44492|issue:44509", "issue:43653|issue:43720", "issue:44368|issue:45357", "issue:43381|issue:43844", "issue:43606|issue:45072", "issue:43782|issue:45381", "issue:44451|issue:44991"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6940, "estimated_input_tokens": 3342, "item_count": 18, "node_count": 18, "serialized_chars": 13365, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:48:51Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3a27e409082eb4b64b48b88024f695c0ae093e843d1f4b5e85812feb16ff9bc8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34567", "issue:34634", "issue:35707", "issue:41720", "issue:42886", "issue:43421", "issue:43550", "issue:43720", "issue:43819", "issue:43927", "issue:43994", "issue:44062", "issue:44361", "issue:44451", "issue:44671", "issue:44843", "issue:44918", "issue:44936"], "result": {"analyst_result": {"best_issue_reason": "44843 is the strongest representative issue because it is concrete, actionable, and root-cause oriented; 42886 reads as the broader symptom report for the same offline tokenizer-loading bug.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44843 is the most precise root-cause report in the cluster: it names the offending `_patch_mistral_regex` / `model_info()` call and the HF_HUB_OFFLINE failure mode, making it the best anchor for the one likely duplicate pair.", "canonical_pr_reason": null, "confidence": 0.79, "soft_edge_verdicts": [{"accept": false, "left": "issue:44918", "reason": "Different failures: one is Qwen3.5 embedding unpacking with TRL SFT trainer, the other is a generic `trainer.evaluate()` after `trainer.train()` problem. No clear shared code path.", "right": "issue:44936"}, {"accept": false, "left": "issue:44062", "reason": "Both involve tokenizer/model loading, but one is an `AddedToken(..., special=...)` TypeError and the other is a model loading failure for `vesteinn/ScandiBERT`; not the same bug.", "right": "issue:44451"}, {"accept": false, "left": "issue:43550", "reason": "Unrelated subsystems and symptoms: torch.compile/SDPA for Bamba versus `MLukeTokenizer` AttributeError on tasks.", "right": "issue:44361"}, {"accept": false, "left": "issue:43550", "reason": "Different concrete problems: compile-time SDPA failure versus config save/load losing custom token IDs and causing generation errors.", "right": "issue:43927"}, {"accept": false, "left": "issue:34634", "reason": "BarkProcessor `voice_preset` and Qwen3 auto device mapping/cuda assert are unrelated model-specific bugs.", "right": "issue:41720"}, {"accept": false, "left": "issue:43994", "reason": "Both are bad-output reports, but they affect different models and different pipelines; no evidence of the same underlying defect.", "right": "issue:44671"}, {"accept": true, "left": "issue:42886", "reason": "These point to the same offline tokenizer-loading bug: `HF_HUB_OFFLINE=1` causes a cache-load failure, and 44843 identifies the likely root cause as an unconditional `model_info()` call inside `_patch_mistral_regex`.", "right": "issue:44843"}, {"accept": false, "left": "issue:34567", "reason": "`num_input_tokens_seen` not updating in TrainerState is unrelated to progressive generation with `inputs_embeds` and `past_key_values`.", "right": "issue:35707"}, {"accept": false, "left": "issue:43720", "reason": "Different model paths and failures: BitNet packed-weight unpacking during accelerate loading versus DAC latent reconstruction missing STE.", "right": "issue:43819"}, {"accept": false, "left": "issue:43421", "reason": "Both touch tokenizer configuration, but one is runtime post-processor refresh after special-token edits and the other is save/load dropping custom token IDs; different bugs and fixes.", "right": "issue:43927"}], "summary": "This cluster is mostly heterogeneous issue reports; nearly all proposed soft pairs are different bugs in different models/code paths. The only plausible duplicate is the offline-tokenizer failure around `model_info()` in `_patch_mistral_regex`."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44918|issue:44936", "issue:44062|issue:44451", "issue:43550|issue:44361", "issue:43550|issue:43927", "issue:34634|issue:41720", "issue:43994|issue:44671", "issue:42886|issue:44843", "issue:34567|issue:35707", "issue:43720|issue:43819", "issue:43421|issue:43927"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7200, "estimated_input_tokens": 3472, "item_count": 18, "node_count": 18, "serialized_chars": 13887, "soft_pair_count": 11}, "cached_at": "2026-04-14T15:49:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3dc9d95e2fd3aa15b4f69b3c48ea5ec1b2bfec3b116f8d3cf27fea2e877f9e58", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39692", "issue:43421", "issue:43525", "issue:43531", "issue:43596", "issue:43653", "issue:44077", "issue:44186", "issue:44220", "issue:44589", "issue:44610", "issue:44625", "issue:44898", "issue:44987", "issue:45042", "issue:45072", "issue:45081", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43531", "reason": "Different failures and code paths: Qwen3-MoE sliding-window behavior vs a Float8 storage lookup error.", "right": "issue:44589"}, {"accept": false, "left": "issue:44987", "reason": "Both are regressions, but one is model loading and the other is PIL image-processor dependency handling; not the same bug.", "right": "issue:45042"}, {"accept": false, "left": "issue:44077", "reason": "Unrelated model/config issues: patchtsmixer post_init policy vs Qwen3.5 num_labels propagation.", "right": "issue:44625"}, {"accept": false, "left": "issue:43421", "reason": "Both concern tokenizers, but one is runtime special-token/post-processor refresh and the other is BigBird mask-token registration causing empty decode; not the same concrete defect.", "right": "issue:43653"}, {"accept": false, "left": "issue:43421", "reason": "Different tokenizer subsystems and symptoms: post-processor refresh bug vs LayoutLMv2 NER/padding crash.", "right": "issue:44186"}, {"accept": false, "left": "issue:44186", "reason": "LayoutLMv2 tokenizer padding/NER crash and Perceiver image-size failure are unrelated.", "right": "issue:44898"}, {"accept": false, "left": "issue:45081", "reason": "Both mention fix_mistral_regex, but one is a hard crash in _patch_mistral_regex while the other is Kimi-K2.5 codec/warning regression; not clearly the same bug.", "right": "issue:45356"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 docs example errors are unrelated to OmDet-Turbo processor/model input-size mismatch.", "right": "issue:44610"}, {"accept": false, "left": "issue:44610", "reason": "Different inference problems: image-size mismatch in OmDet-Turbo vs dtype mismatch in SwitchTransformers/TimmWrapperModel.", "right": "issue:45072"}, {"accept": false, "left": "issue:43596", "reason": "BertModel zero3 init IndexError and _torch_extract_fbank_features are unrelated code paths.", "right": "issue:44220"}, {"accept": false, "left": "issue:43525", "reason": "Llama4Config missing pad_token_id is unrelated to Qwen3-MoE sliding_window behavior.", "right": "issue:43531"}], "summary": "These issues are mostly unrelated regressions across tokenizers, configs, vision processors, and model-loading paths. None of the soft pairs look like the same underlying bug/change closely enough to merge as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43531|issue:44589", "issue:44987|issue:45042", "issue:44077|issue:44625", "issue:43421|issue:43653", "issue:43421|issue:44186", "issue:44186|issue:44898", "issue:45081|issue:45356", "issue:39692|issue:44610", "issue:44610|issue:45072", "issue:43596|issue:44220", "issue:43525|issue:43531"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6774, "estimated_input_tokens": 3259, "item_count": 17, "node_count": 17, "serialized_chars": 13036, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:50:02Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5293658bf4feca9d113329412a43433ffa7eae49c0fb91cc8341b63990f2117a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39692", "issue:40444", "issue:43540", "issue:43550", "issue:43653", "issue:43716", "issue:43746", "issue:43819", "issue:43873", "issue:44008", "issue:44186", "issue:44220", "issue:44442", "issue:44661", "issue:44843", "issue:45081", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43746", "reason": "Different subsystems and failure modes: PEFT/local checkpoint loading for GraniteSpeech vs Qwen2.5-VL video position IDs.", "right": "issue:45381"}, {"accept": false, "left": "issue:43540", "reason": "Both are multimodal, but one is a Qwen video dataset batching failure and the other is a Gemma audio field name collision in forward(); not the same bug.", "right": "issue:44008"}, {"accept": false, "left": "issue:43653", "reason": "BigBirdTokenizer special-token registration and DAC latent reconstruction mismatch are unrelated code paths.", "right": "issue:43819"}, {"accept": false, "left": "issue:44220", "reason": "Audio feature extraction failure vs tokenizer mapping/new-model registration logic; no common underlying defect.", "right": "issue:44661"}, {"accept": false, "left": "issue:44843", "reason": "Both mention _patch_mistral_regex, but one is offline-model_info access and the other is a backend_tokenizer attribute crash; distinct bugs.", "right": "issue:45081"}, {"accept": false, "left": "issue:43716", "reason": "Mistral image dtype mismatch and Gemma3n audio tensor name collision are different model-specific issues.", "right": "issue:44008"}, {"accept": false, "left": "issue:43550", "reason": "torch.compile+SDPA failure in Bamba and DAC.from_latents/forward mismatch are unrelated.", "right": "issue:43819"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 doc example errors are about model/processor mismatch and quantization, while the other is a broader offloading-with-quantization report.", "right": "issue:43873"}, {"accept": false, "left": "issue:40444", "reason": "Qwen2.5-VL iterable dataset multi-image failure has nothing in common with GraniteSpeech local checkpoint adapter loading.", "right": "issue:43746"}, {"accept": false, "left": "issue:44186", "reason": "LayoutLMv2Tokenizer NER/padding crash and AutoTokenizer failing to load FastSpeech2ConformerTokenizer are separate tokenizer bugs.", "right": "issue:44442"}], "summary": "This cluster is a set of mostly unrelated issue reports spanning tokenizers, multimodal models, quantization/offloading, and loading regressions. The soft-similarity links are superficial; none look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43746|issue:45381", "issue:43540|issue:44008", "issue:43653|issue:43819", "issue:44220|issue:44661", "issue:44843|issue:45081", "issue:43716|issue:44008", "issue:43550|issue:43819", "issue:39692|issue:43873", "issue:40444|issue:43746", "issue:44186|issue:44442"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6556, "estimated_input_tokens": 3150, "item_count": 17, "node_count": 17, "serialized_chars": 12599, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:50:37Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "69bf7badc39abc3d366af744cae1cd11a74fb1bdfe927e7fe029c7b480573bc5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30064", "issue:36010", "issue:43335", "issue:43421", "issue:43540", "issue:43550", "issue:43618", "issue:43697", "issue:43720", "issue:43746", "issue:43756", "issue:43873", "issue:44062", "issue:44488", "issue:44811", "issue:45072", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "issue:43873 is the broadest, most generally phrased open bug here, but it is not a true canonical duplicate target for the rest of the set.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43618", "reason": "Different bugs: CLIPOutput attentions field regression vs RTDetrV2 output changes in v5; unrelated code paths and symptoms.", "right": "issue:43697"}, {"accept": false, "left": "issue:43746", "reason": "Different load-path problems: PEFT adapter checkpoint loading vs quantization/offloading behavior; not the same underlying defect.", "right": "issue:43873"}, {"accept": false, "left": "issue:43335", "reason": "Both are transformer model config issues, but one is sparse-layer creation in SwitchTransformers and the other is RoPE layer dropping in Smollm3; distinct bugs.", "right": "issue:43756"}, {"accept": false, "left": "issue:44062", "reason": "Tokenizer AddedToken constructor error is different from a specific model failing to load; no shared failure mechanism.", "right": "issue:44488"}, {"accept": false, "left": "issue:43720", "reason": "One is BitNet packed-weight loading under accelerate, the other is dtype mismatches in SwitchTransformers/TimmWrapperModel bfloat16 inference; different regressions.", "right": "issue:45072"}, {"accept": false, "left": "issue:43550", "reason": "Both mention inference/runtime issues, but torch.compile+SDPA in Bamba is unrelated to bfloat16 dtype mismatches in other models.", "right": "issue:45072"}, {"accept": false, "left": "issue:30064", "reason": "Completely unrelated: image segmentation-map processing vs an import error for GenerationMixin.", "right": "issue:36010"}, {"accept": false, "left": "issue:43421", "reason": "Tokenizers backend special-token post-processor updates are not the same as Whisper batch_decode ignoring skip_special_tokens.", "right": "issue:44811"}, {"accept": false, "left": "issue:43540", "reason": "Both are video-input related, but they affect different Qwen models and different errors (processing ValueError vs wrong vision_position_ids).", "right": "issue:45381"}], "summary": "No soft pair looks like a true duplicate or mergeable PR-equivalent; the set is a mix of unrelated bugs spanning models, tokenizers, loading, quantization, video, and import regressions."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43618|issue:43697", "issue:43746|issue:43873", "issue:43335|issue:43756", "issue:44062|issue:44488", "issue:43720|issue:45072", "issue:43550|issue:45072", "issue:30064|issue:36010", "issue:43421|issue:44811", "issue:43540|issue:45381"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7016, "estimated_input_tokens": 3380, "item_count": 18, "node_count": 18, "serialized_chars": 13518, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:51:11Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c83466a9c0be40809ad2db15e4a1f64943947ae22c1b4d4b5025f3dc729d5450", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43531", "issue:43577", "issue:43582", "issue:43643", "issue:43644", "issue:43749", "issue:43782", "issue:43818", "issue:43825", "issue:43994", "issue:44448", "issue:44561", "issue:44671", "issue:44877", "issue:44977", "issue:45020", "issue:45081", "issue:45406"], "result": {"analyst_result": {"best_issue_reason": "issue:45020 is the best global issue representative because it captures the general regression theme most directly and is broad enough to anchor the cluster, even though the rest do not appear to be duplicates.", "best_pr_reason": null, "canonical_issue_reason": "issue:45020 is the broadest and most representative issue in the set, covering recent-version breakages for remote_code models; the other issues are narrower, model-specific, or clearly unrelated.", "canonical_pr_reason": null, "confidence": 0.74, "soft_edge_verdicts": [{"accept": false, "left": "issue:43577", "reason": "Different models and failures: BLIP2 dtype handling vs Video-LLaVA missing temporal attention / weight sharing.", "right": "issue:43818"}, {"accept": false, "left": "issue:44561", "reason": "Both concern remote_code regressions, but one is a specific is_torch_fx_available removal bug while the other is a broader report; not enough to treat as the same concrete issue.", "right": "issue:45020"}, {"accept": false, "left": "issue:43643", "reason": "Both involve loading/config behavior, but the concrete failures differ: missing fields from trust_remote_code vs strict config blocking granite_speech.", "right": "issue:44877"}, {"accept": false, "left": "issue:45081", "reason": "Different crash sites and components: Mistral tokenizer regex patching vs Gemma4Processor missing _tokenizer in serve.", "right": "issue:45406"}, {"accept": false, "left": "issue:44448", "reason": "Both report output changes in v5, but for different models and likely different code paths; not a clear single underlying bug.", "right": "issue:44671"}, {"accept": false, "left": "issue:43782", "reason": "Different features and symptoms: weight_only loading for Qwen3VL vs flash-attention generation issues for Qwen3.5.", "right": "issue:44977"}, {"accept": false, "left": "issue:43531", "reason": "Qwen3-MoE sliding_window behavior is unrelated to AutoConfig trust_remote_code missing fields.", "right": "issue:43643"}, {"accept": false, "left": "issue:43577", "reason": "No shared bug surface: BLIP2 dtype loading vs Apple Silicon caching_allocator_warmup TypeError.", "right": "issue:43582"}, {"accept": false, "left": "issue:43644", "reason": "Both are v5 regressions around loading, but they describe different failures and code paths; too broad to merge.", "right": "issue:43749"}, {"accept": false, "left": "issue:43825", "reason": "Pipeline translation warning text and SigLIP2 nonsensical outputs are unrelated problems.", "right": "issue:43994"}], "summary": "The cluster is mostly a set of distinct Transformers v5 regression reports across unrelated models and code paths. A few are thematically similar (remote_code / v5 breakages), but none are strong duplicate candidates from the available evidence."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43577|issue:43818", "issue:44561|issue:45020", "issue:43643|issue:44877", "issue:45081|issue:45406", "issue:44448|issue:44671", "issue:43782|issue:44977", "issue:43531|issue:43643", "issue:43577|issue:43582", "issue:43644|issue:43749", "issue:43825|issue:43994"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6862, "estimated_input_tokens": 3303, "item_count": 18, "node_count": 18, "serialized_chars": 13211, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:51:45Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "46ae7f66db58bf36ee126c69abf04c73e030951d5bfac279d5e777cba0a0913e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30064", "issue:36331", "issue:41720", "issue:42175", "issue:43650", "issue:43720", "issue:43782", "issue:43819", "issue:43825", "issue:43827", "issue:43906", "issue:44361", "issue:44610", "issue:44805", "issue:44936", "issue:44991", "issue:45081", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:43819", "reason": "Different components and failures: DAC latent reconstruction vs MLukeTokenizer task attribute error.", "right": "issue:44361"}, {"accept": false, "left": "issue:44805", "reason": "A tensor mask shape mismatch during indexing is not the same bug as trainer.evaluate() failing after train().", "right": "issue:44936"}, {"accept": false, "left": "issue:43650", "reason": "One is a nonspecific data request; the other is an isolated reproduction of a separate issue.", "right": "issue:43906"}, {"accept": false, "left": "issue:41720", "reason": "Qwen3 CUDA assert on auto device mapping is unrelated to tokenizer loading for EMBEDDIA/est-roberta.", "right": "issue:44991"}, {"accept": false, "left": "issue:43720", "reason": "BitNet packed-weight loading during accelerate differs from OmDet-Turbo processor image-size mismatch.", "right": "issue:44610"}, {"accept": false, "left": "issue:42175", "reason": "Missing TensorFlow backend in installation extras is unrelated to a Mistral tokenizer regex crash.", "right": "issue:45081"}, {"accept": false, "left": "issue:43825", "reason": "They are related to the v5 pipeline removal topic, but one is a runtime error-message issue and the other is stale docs; not the same underlying bug.", "right": "issue:43827"}, {"accept": false, "left": "issue:30064", "reason": "Void segmentation map processing and CustomTrainer.compute_loss keyword regression are different failures.", "right": "issue:36331"}, {"accept": false, "left": "issue:43782", "reason": "Different model families and distinct VL bugs: weight_only loading error vs rope index scaling in Qwen2.5-VL.", "right": "issue:45325"}], "summary": "This cluster looks like a false positive: the issues span unrelated models, tokenizers, trainer behavior, CI/loading bugs, docs, and a generic data report. None of the soft pairs appear to describe the same underlying bug or change, so no duplicate merge is justified."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43819|issue:44361", "issue:44805|issue:44936", "issue:43650|issue:43906", "issue:41720|issue:44991", "issue:43720|issue:44610", "issue:42175|issue:45081", "issue:43825|issue:43827", "issue:30064|issue:36331", "issue:43782|issue:45325"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6616, "estimated_input_tokens": 3180, "item_count": 17, "node_count": 17, "serialized_chars": 12717, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:52:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "37538c35c1527c54f6bcd50935767a94a807afa55bac136dd471be32d06e3aa1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34634", "issue:36010", "issue:43262", "issue:43526", "issue:43582", "issue:43653", "issue:43697", "issue:43749", "issue:43819", "issue:44060", "issue:44291", "issue:44368", "issue:44485", "issue:44509", "issue:44554", "issue:44671", "issue:44857"], "result": {"analyst_result": {"best_issue_reason": "Issue 43262 is the best standalone issue in the set because it is well-scoped, reproducible, and likely to have the clearest fix path and highest reuse as a reference point.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43262 is the strongest representative by specificity and impact: it has concrete reproduction around audio processor chat templating, clear expected-vs-actual behavior, and the most inbound references among the set.", "canonical_pr_reason": null, "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "issue:44485", "reason": "Both touch docs/behavior around model/task support, but one is about GLM-5 RoPE implementation and the other about removed pipeline task docs; these are different changes and not the same bug.", "right": "issue:44509"}, {"accept": false, "left": "issue:34634", "reason": "BarkProcessor voice preset failure and GenerationMixin import error are unrelated subsystems and failure modes.", "right": "issue:36010"}, {"accept": false, "left": "issue:44060", "reason": "Both mention tied-weights warnings in Qwen3/Qwen3.5 contexts, but one is a concrete incorrect tying bug across layers while the other is a user-facing warning during LoRA fine-tuning; not clearly the same code-path fix.", "right": "issue:44368"}, {"accept": false, "left": "issue:43582", "reason": "Both are runtime crashes on specific platforms/dtypes, but one is Apple Silicon allocator warmup TypeError and the other is a CUDA float16 loss crash; different components and fixes.", "right": "issue:44857"}, {"accept": false, "left": "issue:43749", "reason": "Both concern loading/initialization paths, but FSDP CPU RAM efficient loading and init_empty_weights/_is_hf_initialized TypeError are distinct mechanisms and not one concrete bug.", "right": "issue:44291"}, {"accept": false, "left": "issue:43526", "reason": "BeitImageProcessorFast label reduction and RTDetrV2 output differences are separate model/processor correctness issues with no shared code path evident.", "right": "issue:43697"}, {"accept": false, "left": "issue:43653", "reason": "BigBirdTokenizer special-token registration and CamemBERT masked-LM prediction errors are different tokenizer/model bugs; no strong evidence of the same underlying defect.", "right": "issue:44671"}, {"accept": false, "left": "issue:43526", "reason": "A Beit image-processor label bug and an MPS attention correctness issue are unrelated; the overlap is only that both are model-side correctness reports.", "right": "issue:44554"}, {"accept": false, "left": "issue:43262", "reason": "Audio processor apply_chat_template sampling-rate defaulting and DAC.from_latents vs forward mismatch are different audio/model pathways and do not look mergeable into one fix.", "right": "issue:43819"}], "summary": "The items are a loose thematic cluster of unrelated Transformers issues, not true duplicates. Most pairs only share a subsystem or vague bug shape, so all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44485|issue:44509", "issue:34634|issue:36010", "issue:44060|issue:44368", "issue:43582|issue:44857", "issue:43749|issue:44291", "issue:43526|issue:43697", "issue:43653|issue:44671", "issue:43526|issue:44554", "issue:43262|issue:43819"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6892, "estimated_input_tokens": 3318, "item_count": 18, "node_count": 18, "serialized_chars": 13271, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:52:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "93546c78a256038d4a83432e70d98e7413ef149dcd9c22939d4430285e787e3b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34634", "issue:36331", "issue:43257", "issue:43329", "issue:43452", "issue:43454", "issue:43577", "issue:43638", "issue:43653", "issue:43746", "issue:43854", "issue:44589", "issue:44811", "issue:44912", "issue:44936", "issue:44977", "issue:44991", "issue:45042"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43452", "reason": "Both mention tokenizer/model loading, but the failures are different: gguf_file handling vs a specific EMBEDDIA/est-roberta tokenizer regression.", "right": "issue:44991"}, {"accept": false, "left": "issue:43638", "reason": "One is a DeepSpeed ZeRO-3 IndexError with a non-pretrained BERT model; the other is a trainer.evaluate() failure after train() with no shared concrete code path.", "right": "issue:44936"}, {"accept": false, "left": "issue:43454", "reason": "AyaVision lm_head weight tying and BLIP2 dtype propagation are separate model-specific bugs.", "right": "issue:43577"}, {"accept": false, "left": "issue:43746", "reason": "PEFT local checkpoint loading for GraniteSpeech is unrelated to Whisper batch_decode skip_special_tokens behavior.", "right": "issue:44811"}, {"accept": false, "left": "issue:34634", "reason": "BarkProcessor voice_preset and CustomTrainer.compute_loss() kwargs are unrelated subsystems with no common underlying bug.", "right": "issue:36331"}, {"accept": false, "left": "issue:43653", "reason": "BigBirdTokenizer special-token registration and MXFP4 quantization fallback are different problems in different components.", "right": "issue:44912"}, {"accept": false, "left": "issue:44589", "reason": "Float8 storage lookup failure and PIL backend requiring torchvision are unrelated environment/backend issues.", "right": "issue:45042"}, {"accept": false, "left": "issue:43854", "reason": "GLM-4.7-Flash test loading failure and Qwen3.5 flash-attention generation issues are distinct model-specific bugs.", "right": "issue:44977"}, {"accept": false, "left": "issue:43257", "reason": "Qwen3 MOE weight conversion with accelerate+deepspeed is unrelated to multimodal token counting in the video branch.", "right": "issue:43329"}], "summary": "These issues are heterogeneous and do not look like duplicates of one another; they cover unrelated bugs in tokenizers, model loading, training, quantization, multimodal processing, and image processors. No PRs are present."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43452|issue:44991", "issue:43638|issue:44936", "issue:43454|issue:43577", "issue:43746|issue:44811", "issue:34634|issue:36331", "issue:43653|issue:44912", "issue:44589|issue:45042", "issue:43854|issue:44977", "issue:43257|issue:43329"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6826, "estimated_input_tokens": 3285, "item_count": 18, "node_count": 18, "serialized_chars": 13137, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:53:15Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "329a681f12ce153a68d33f7b2761e1042afcf0da108a2d277d99316259ca5cf2", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:38175", "issue:39692", "issue:42175", "issue:43257", "issue:43262", "issue:43531", "issue:43645", "issue:43696", "issue:43749", "issue:44568", "issue:44610", "issue:44756", "issue:44811", "issue:44912", "issue:44938", "issue:45042", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "If one issue must represent the set, #45356 is the clearest standalone regression report, but it is not a valid umbrella for the rest.", "best_pr_reason": null, "canonical_issue_reason": "No canonical issue should be chosen; the items do not share a single underlying bug or change, so collapsing them would be incorrect.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:44568", "reason": "Different tokenizer regressions: one is BOS/EOS handling for mdeberta-v3-base in v5, the other is a Kimi-K2.5 codec/fix_mistral_regex regression in 5.4.0.", "right": "issue:45356"}, {"accept": false, "left": "issue:38175", "reason": "Unrelated models and symptoms: siglip2 zero probabilities vs OmDet-Turbo processor/model input-size mismatch.", "right": "issue:44610"}, {"accept": false, "left": "issue:36010", "reason": "Different failure modes: import path breakage for GenerationMixin vs Jupyter custom-model initialization in Transformers 5.0.0.", "right": "issue:43645"}, {"accept": false, "left": "issue:43257", "reason": "One is a Qwen3 MoE loading/conversion issue with accelerate+deepspeed; the other is a GPU OOM report for GPT-oss-20b.", "right": "issue:43696"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 docs/model-processor and quantization problems are not the same as a git-oss-20b MXFP4 fallback bug.", "right": "issue:44912"}, {"accept": false, "left": "issue:42175", "reason": "Backend packaging problem for tensorflow in '.[torch]' is unrelated to Python 3.14 import/load failures.", "right": "issue:44938"}, {"accept": false, "left": "issue:43262", "reason": "Audio chat-template sampling-rate defaulting and Whisper batch_decode skip_special_tokens are separate processor bugs.", "right": "issue:44811"}, {"accept": false, "left": "issue:43749", "reason": "Both mention memory/loading, but one is FSDP CPU RAM efficient loading and the other is mmap on Strix Halo; different code paths and fixes.", "right": "issue:44756"}, {"accept": false, "left": "issue:43531", "reason": "Qwen3-MoE sliding_window behavior and PIL backend torchvision dependency are unrelated subsystems.", "right": "issue:45042"}], "summary": "No duplicate cluster here: the issues span unrelated bugs and regressions across tokenizers, model loading, processors, quantization, memory, and platform-specific behavior."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44568|issue:45356", "issue:38175|issue:44610", "issue:36010|issue:43645", "issue:43257|issue:43696", "issue:39692|issue:44912", "issue:42175|issue:44938", "issue:43262|issue:44811", "issue:43749|issue:44756", "issue:43531|issue:45042"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6586, "estimated_input_tokens": 3165, "item_count": 17, "node_count": 17, "serialized_chars": 12659, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:54:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e7f1c2869f08a96d18c136dd8acfd8b9a53e627a83cad12288cc71fbeca97834", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:36331", "issue:41720", "issue:43257", "issue:43329", "issue:43452", "issue:43540", "issue:43550", "issue:43582", "issue:43697", "issue:43756", "issue:43931", "issue:44361", "issue:44442", "issue:44479", "issue:44912", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": "If forced to pick one representative issue, issue 44479 is the broadest and most central regression report in the Qwen video-input area, but it still does not represent the rest of the cluster well.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits the cluster because the items cover unrelated failures across tokenizers, training APIs, model loading, multimodal/video paths, quantization, and backend/runtime bugs.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43329", "reason": "Both concern Qwen video/multimodal processing, but 43329 is an undefined-name bug in multimodal token counting, while 44479 is a broader video-input regression across several Qwen models; different root causes.", "right": "issue:44479"}, {"accept": false, "left": "issue:43257", "reason": "Both involve model-specific loading/configuration problems, but one is a Qwen3 MoE accelerate/deepspeed conversion issue and the other is a Smollm3 RoPE-layer mismatch; not the same bug.", "right": "issue:43756"}, {"accept": false, "left": "issue:43550", "reason": "Different models and different failure modes: torch.compile/SDPA on Bamba-9B-v2 versus MXFP4 quantization fallback on git-oss-20b.", "right": "issue:44912"}, {"accept": false, "left": "issue:36010", "reason": "An import-path break for GenerationMixin is unrelated to the CustomTrainer compute_loss signature change.", "right": "issue:36331"}, {"accept": false, "left": "issue:43697", "reason": "Both are model regressions, but RTDetrV2 output changes in v5 and Qwen3.5 save_pretrained writing wrong visual encoder keys are separate code paths and fixes.", "right": "issue:45357"}, {"accept": false, "left": "issue:43257", "reason": "Both are Qwen3 loading issues, but one is about missing conversion under accelerate+deepspeed and the other is a weight-shape mismatch for Qwen3-VL-30B-A3B-Instruct; distinct underlying problems.", "right": "issue:43931"}, {"accept": false, "left": "issue:41720", "reason": "These are different Qwen3-family failures: one is an auto device-mapping cuda assert on A800, the other is a video-input ValueError in Qwen3OmniMoe.", "right": "issue:43540"}, {"accept": false, "left": "issue:44361", "reason": "Both are tokenizer-loading failures, but they affect different tokenizers and error sites; no evidence they stem from the same defect.", "right": "issue:44442"}, {"accept": false, "left": "issue:43452", "reason": "A gguf_file loading regression and an Apple Silicon caching_allocator_warmup TypeError are unrelated.", "right": "issue:43582"}], "summary": "This cluster is heterogeneous: the soft-similar pairs share broad keywords or subsystems, but none appear to be the same underlying bug or change. No duplicate pair should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43329|issue:44479", "issue:43257|issue:43756", "issue:43550|issue:44912", "issue:36010|issue:36331", "issue:43697|issue:45357", "issue:43257|issue:43931", "issue:41720|issue:43540", "issue:44361|issue:44442", "issue:43452|issue:43582"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7170, "estimated_input_tokens": 3457, "item_count": 18, "node_count": 18, "serialized_chars": 13826, "soft_pair_count": 11}, "cached_at": "2026-04-14T15:55:28Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ccb60f4041bb855b68f9549763004ef5fc9b6b8ce48de8f64ee1842c1c4b73bf", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42175", "issue:42915", "issue:43329", "issue:43450", "issue:43540", "issue:43577", "issue:43824", "issue:43906", "issue:44062", "issue:44291", "issue:44479", "issue:44488", "issue:44560", "issue:44821", "issue:44991", "issue:45042", "issue:45072", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "44479 is the strongest global anchor because it captures the shared video-input regression at the broadest scope; the other issues are either unrelated or too model-specific to serve as a canonical representative.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44479 is the best canonical issue: it is the broader, earlier report of the Qwen VL video-input regression, and 45381 reads like a narrower symptom of the same code path (vision_position_ids/video handling).", "canonical_pr_reason": null, "confidence": 0.86, "soft_edge_verdicts": [{"accept": false, "left": "issue:44062", "reason": "Both are tokenizer-loading failures, but the root causes differ: AddedToken kwarg duplication vs a specific model tokenizer regression.", "right": "issue:44991"}, {"accept": true, "left": "issue:44479", "reason": "Both describe the same Qwen VL video-input regression in v5.3.0; 45381 looks like a narrower symptom of the same video-position-id/code-path bug.", "right": "issue:45381"}, {"accept": false, "left": "issue:43329", "reason": "Video-related, but one is an undefined-variable bug in token counting and the other is a batched-shape bug in video processors.", "right": "issue:43450"}, {"accept": false, "left": "issue:42175", "reason": "Unrelated problems: pip extra/backend packaging vs an import failure for a Qwen2.5-VL class.", "right": "issue:43824"}, {"accept": false, "left": "issue:43540", "reason": "Both involve video inputs, but they hit different Qwen model families and different failure modes, so they do not look like one bug.", "right": "issue:44560"}, {"accept": false, "left": "issue:44291", "reason": "Different code paths entirely: init_empty_weights/_is_hf_initialized handling vs PIL image-processor torchvision dependency.", "right": "issue:45042"}, {"accept": false, "left": "issue:43577", "reason": "Both are dtype mismatches, but in different models and loading/inference paths; not the same concrete defect.", "right": "issue:45072"}, {"accept": false, "left": "issue:42175", "reason": "Completely different symptoms: TensorFlow backend packaging vs failure to load a specific tokenizer/model.", "right": "issue:44488"}, {"accept": false, "left": "issue:43906", "reason": "One is a reproduction of another unrelated issue, while the other is a Qwen2.5-VL video-input regression.", "right": "issue:45381"}, {"accept": false, "left": "issue:42915", "reason": "No shared bug: FP8 configuration failure for Qwen3Moe vs URL-based AutoImageProcessor loading.", "right": "issue:44821"}, {"accept": false, "left": "issue:43540", "reason": "Both are video-input issues, but 43540 is Qwen3OmniMoe-specific and 44479 is a broader Qwen VL regression; too different to merge as one bug.", "right": "issue:44479"}], "summary": "This cluster is mostly heterogeneous. Only the two Qwen VL video-input reports (44479 and 45381) look like the same underlying regression; the rest are distinct bugs across tokenizers, dtype handling, image processors, installs, and unrelated model families."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44062|issue:44991", "issue:44479|issue:45381", "issue:43329|issue:43450", "issue:42175|issue:43824", "issue:43540|issue:44560", "issue:44291|issue:45042", "issue:43577|issue:45072", "issue:42175|issue:44488", "issue:43906|issue:45381", "issue:42915|issue:44821", "issue:43540|issue:44479"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6824, "estimated_input_tokens": 3284, "item_count": 18, "node_count": 18, "serialized_chars": 13136, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:56:15Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1815adacef4cdc3416f5419018aaa3d1cee774caf54540825421de68c8f521b7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33357", "issue:34567", "issue:36010", "issue:42175", "issue:42915", "issue:43582", "issue:43643", "issue:43720", "issue:43742", "issue:43756", "issue:43782", "issue:44484", "issue:44704", "issue:44898", "issue:45137", "issue:45325", "issue:45375", "issue:45405"], "result": {"analyst_result": {"best_issue_reason": "Issue 43742 is the strongest representative because it describes a specific, reproducible model-loading failure with a narrow scope and clear actionable symptom.", "best_pr_reason": null, "canonical_issue_reason": "There is no single canonical duplicate among these issues; they span unrelated bugs. If an anchor is needed, issue 43742 is the most concrete and self-contained report.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:42915", "reason": "Both concern model loading, but one is a Qwen3Moe FP8 config failure and the other is a MobileLLM key error; different models and different root causes.", "right": "issue:43742"}, {"accept": false, "left": "issue:33357", "reason": "Completely different problems: a Mac bus error with a CLIP model versus TrainerState token-count bookkeeping not updating.", "right": "issue:34567"}, {"accept": false, "left": "issue:43720", "reason": "Both are model/runtime failures, but one is BitNet packed-weight unpacking during Accelerate loading and the other is Perceiver image-classification positional encoding behavior.", "right": "issue:44898"}, {"accept": false, "left": "issue:43756", "reason": "Both mention RoPE, but one is about Smollm3 dropping RoPE layers and the other is a Qwen2.5-VL still-image position-id scaling bug.", "right": "issue:45325"}, {"accept": false, "left": "issue:45137", "reason": "Unrelated: DeepSpeed ZeRO3 deque exhaustion versus a dependency/version bump for PEFT.", "right": "issue:45405"}, {"accept": false, "left": "issue:36010", "reason": "Different issue classes: GenerationMixin import failure versus missing TensorFlow backend in a torch-only install.", "right": "issue:42175"}, {"accept": false, "left": "issue:44484", "reason": "Both involve pre-trained model APIs, but one is about save_pretrained shard sizing and the other about AutoProcessor not forwarding kwargs to cached_file.", "right": "issue:44704"}, {"accept": false, "left": "issue:43582", "reason": "Different failures on different code paths: Apple Silicon allocator warmup TypeError versus Qwen3VL weight_only loading error.", "right": "issue:43782"}, {"accept": false, "left": "issue:43643", "reason": "Both mention missing config fields, but one is a generic trust_remote_code AutoConfig issue and the other is a specific Qwen3_5MoeVisionConfig field dropped by @strict; not the same bug.", "right": "issue:45375"}], "summary": "No soft pair looks like a true duplicate. Most matches are only loosely similar by subsystem (loading/config/training/vision), but the concrete bugs, affected APIs, and failure modes differ."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42915|issue:43742", "issue:33357|issue:34567", "issue:43720|issue:44898", "issue:43756|issue:45325", "issue:45137|issue:45405", "issue:36010|issue:42175", "issue:44484|issue:44704", "issue:43582|issue:43782", "issue:43643|issue:45375"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7026, "estimated_input_tokens": 3385, "item_count": 18, "node_count": 18, "serialized_chars": 13539, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:56:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d299c0aa38aa9bf9b8164a75ae3c4357f5cb2722bc3bd144948b701720187205", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:28282", "issue:29127", "issue:34567", "issue:34689", "issue:36331", "issue:38175", "issue:43452", "issue:43646", "issue:43653", "issue:43742", "issue:43749", "issue:44062", "issue:44291", "issue:44361", "issue:44488", "issue:44821", "issue:44898", "issue:45081"], "result": {"analyst_result": {"best_issue_reason": "Issue 43646 is the best single issue to anchor the cluster because it captures a broad framework-level regression and can absorb related follow-up reports better than the narrower tokenizer, model-loading, or trainer-specific bugs.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43646 is the broadest and most umbrella-like report in the set, describing a general Transformers 5.0.0 custom model initialization breakage rather than a model-specific or narrowly scoped failure.", "canonical_pr_reason": null, "confidence": 0.38, "soft_edge_verdicts": [{"accept": false, "left": "issue:38175", "reason": "Both are vision/model inference problems, but one is about unexpected zero probabilities in SigLIP2 while the other is a Perceiver resize/interpolation failure; different models and different failure modes.", "right": "issue:44898"}, {"accept": false, "left": "issue:44062", "reason": "Both involve tokenizer-related errors, but the root causes differ: one is a duplicate special-token keyword conflict in AddedToken handling, the other is a Mistral regex patch accessing a missing backend_tokenizer attribute.", "right": "issue:45081"}, {"accept": false, "left": "issue:43646", "reason": "Both mention initialization/loading regressions, but 43646 is about custom model init under Transformers 5.0.0 while 43749 is specifically broken FSDP CPU RAM efficient loading; not the same bug.", "right": "issue:43749"}, {"accept": false, "left": "issue:43742", "reason": "Both are loading-time TypeErrors, but 43742 concerns loading a specific model key error and 44291 is an init_empty_weights/_is_hf_initialized argument mismatch; different code paths.", "right": "issue:44291"}, {"accept": false, "left": "issue:28282", "reason": "These are both model-loading complaints, but 28282 is a missing PyTorch dependency ImportError and 34689 is a version-specific Llama 3.2 Vision loading breakage; unrelated causes.", "right": "issue:34689"}, {"accept": false, "left": "issue:43452", "reason": "Both are tokenizer/model loading failures, but 43452 is a gguf_file regression in AutoTokenizer/AutoModelForCausalLM and 44488 is a specific model repository failing to load; not the same underlying defect.", "right": "issue:44488"}, {"accept": false, "left": "issue:44291", "reason": "These touch loading helpers, but 44291 is an init_empty_weights constructor-argument issue while 44821 is an AutoImageProcessor URL loading problem; different subsystem and symptom.", "right": "issue:44821"}, {"accept": false, "left": "issue:43653", "reason": "Both are tokenizer errors, but 43653 is a BigBird mask-token special-token registration bug and 44361 is an MLukeTokenizer AttributeError on tasks; distinct tokenizer implementations and failures.", "right": "issue:44361"}, {"accept": false, "left": "issue:28282", "reason": "The first is a missing PyTorch dependency ImportError for AutoModel, while the second is a layoutlmv3 error-message clarity issue when boxes lack information; not duplicate bugs.", "right": "issue:29127"}, {"accept": false, "left": "issue:34567", "reason": "Both involve Trainer behavior, but 34567 is about num_input_tokens_seen not updating and 36331 is a CustomTrainer compute_loss signature break from a new num_items_in_batch argument; different fixes.", "right": "issue:36331"}], "summary": "These issues are thematically close around loading/training regressions, but the soft pairs mostly describe different failures, models, or code paths. None of the candidate pairs looks safe to merge as duplicates; if one issue must represent the cluster, the broadest umbrella is the Transformers 5.0.0 custom initialization regression."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:38175|issue:44898", "issue:44062|issue:45081", "issue:43646|issue:43749", "issue:43742|issue:44291", "issue:28282|issue:34689", "issue:43452|issue:44488", "issue:44291|issue:44821", "issue:43653|issue:44361", "issue:28282|issue:29127", "issue:34567|issue:36331"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6514, "estimated_input_tokens": 3129, "item_count": 17, "node_count": 17, "serialized_chars": 12514, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:57:24Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "223e69cc2c51cfde04de4ea7b55d787e64baeae9cd4c96840c615841718bcf6a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43452", "issue:43531", "issue:43643", "issue:43653", "issue:43688", "issue:43742", "issue:43749", "issue:43927", "issue:44514", "issue:44561", "issue:44589", "issue:44792", "issue:44821", "issue:44843", "issue:44964", "issue:45290", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:43643", "reason": "Both involve `trust_remote_code`, but the root causes differ: missing fields in `AutoConfig` return value vs a v5 `is_torch_fx_available` removal breaking remote-code models.", "right": "issue:44561"}, {"accept": false, "left": "issue:43688", "reason": "Different bugs in different areas: auxiliary-loss normalization for MoE models vs broken FSDP CPU RAM-efficient loading.", "right": "issue:43749"}, {"accept": false, "left": "issue:44792", "reason": "Both are model-loading failures, but they concern different models and likely different failure paths; not the same concrete bug.", "right": "issue:44964"}, {"accept": false, "left": "issue:44514", "reason": "Both crash in `apply_chat_template`, but one is batched input with `padding=False` and the other is assistant tool-call messages with no content.", "right": "issue:45290"}, {"accept": false, "left": "issue:43531", "reason": "Unrelated components: Qwen3-MoE sliding-window behavior vs `AutoImageProcessor` URL loading.", "right": "issue:44821"}, {"accept": false, "left": "issue:43653", "reason": "Both touch special-token handling, but one is a tokenizer decode bug and the other is DiaConfig losing custom token IDs after save/load.", "right": "issue:43927"}, {"accept": false, "left": "issue:44514", "reason": "`apply_chat_template` crash vs Qwen2.5-VL video `vision_position_ids` being wrong are different code paths and symptoms.", "right": "issue:45381"}, {"accept": false, "left": "issue:43742", "reason": "A model load `KeyError` for MobileLLM is unrelated to the Float8 storage-object serialization `TypeError`.", "right": "issue:44589"}, {"accept": false, "left": "issue:43452", "reason": "Both involve tokenizer/model loading, but one is `gguf_file` handling and the other is an offline-mode regression from an unconditional `model_info()` call.", "right": "issue:44843"}], "summary": "These items are mostly unrelated issues that only share broad API or subsystem keywords. None of the soft pairs looks like the same underlying bug/change, so I reject all soft edges and do not treat this as a true duplicate cluster."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43643|issue:44561", "issue:43688|issue:43749", "issue:44792|issue:44964", "issue:44514|issue:45290", "issue:43531|issue:44821", "issue:43653|issue:43927", "issue:44514|issue:45381", "issue:43742|issue:44589", "issue:43452|issue:44843"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7184, "estimated_input_tokens": 3464, "item_count": 18, "node_count": 18, "serialized_chars": 13855, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:58:00Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d6a54491ce3188e6d478d0843111cd5363654bd2ef17165931bc644e203cb039", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41762", "issue:43295", "issue:43452", "issue:43504", "issue:43550", "issue:43577", "issue:43606", "issue:43653", "issue:43697", "issue:43720", "issue:44568", "issue:44610", "issue:44898", "issue:44936", "issue:45042", "issue:45081", "issue:45216", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "No single issue clearly represents a duplicate cluster here; if one must be singled out, issue:45042 is a broadly scoped, user-facing regression with multiple references, but it is not a duplicate of the others.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:45042", "reason": "Both are regressions, but one is about PIL image processors requiring torchvision and the other is a Mistral tokenizer regex crash. Different subsystems and failure modes.", "right": "issue:45081"}, {"accept": false, "left": "issue:43504", "reason": "Different models and loading paths: Beit legacy-field preset loading vs BitNet packed-weight handling during accelerate loading.", "right": "issue:43720"}, {"accept": false, "left": "issue:43697", "reason": "RTDetrV2 output drift in v5 and Qwen3.5 save_pretrained checkpoint corruption are unrelated bugs.", "right": "issue:45216"}, {"accept": false, "left": "issue:43452", "reason": "Both concern model/tokenizer loading, but one is a gguf_file regression for tokenizer/model loading and the other is a dtype propagation issue in Blip2.", "right": "issue:43577"}, {"accept": false, "left": "issue:43504", "reason": "Beit pretrained preset loading with a legacy field is unrelated to Bamba-9B-v2 torch.compile/SDPA behavior.", "right": "issue:43550"}, {"accept": false, "left": "issue:41762", "reason": "Gemma3 DeepSpeed ZeRO-3 loading crash and trainer.evaluate failing after trainer.train are different problems in different execution flows.", "right": "issue:44936"}, {"accept": false, "left": "issue:43295", "reason": "Both involve tokenizer regressions, but they affect different model families and different tokenizer behaviors (processor.tokenizer/images vs codec handling and warnings).", "right": "issue:45356"}, {"accept": false, "left": "issue:43606", "reason": "CPU offload device mismatch for bark-small is not the same bug as Perceiver non-default resolution handling with interpolate_pos_encoding.", "right": "issue:44898"}, {"accept": false, "left": "issue:43653", "reason": "Separate tokenizer special-token bugs affecting different models: BigBird mask token registration vs mdeberta add_special_tokens BOS/EOS behavior.", "right": "issue:44568"}, {"accept": false, "left": "issue:43504", "reason": "Beit legacy field loading failure and OmDet-Turbo processor/model input-size mismatch are unrelated.", "right": "issue:44610"}], "summary": "These are mostly unrelated issue reports across different models and code paths. All soft pairs look like false positives: they share broad themes like loading, tokenizers, or regressions, but not the same underlying bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:45042|issue:45081", "issue:43504|issue:43720", "issue:43697|issue:45216", "issue:43452|issue:43577", "issue:43504|issue:43550", "issue:41762|issue:44936", "issue:43295|issue:45356", "issue:43606|issue:44898", "issue:43653|issue:44568", "issue:43504|issue:44610"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7034, "estimated_input_tokens": 3389, "item_count": 18, "node_count": 18, "serialized_chars": 13555, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:58:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9502cb0e63c3a2ee901dd8c719103e7ea680d7a0f5da4cbcaa3d87ecf53657ea", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:40444", "issue:41720", "issue:43525", "issue:43606", "issue:43643", "issue:43720", "issue:43819", "issue:43906", "issue:43927", "issue:44464", "issue:44466", "issue:44625", "issue:44743", "issue:44811", "issue:44843", "issue:44877", "issue:44991", "issue:45005"], "result": {"analyst_result": {"best_issue_reason": "None of the issues is a strong cluster anchor; at best a few are loosely related by topic, but not by root cause.", "best_pr_reason": null, "canonical_issue_reason": "No clear canonical issue: the candidate pairs are heterogeneous and do not describe the same underlying bug.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:44625", "reason": "Both involve config propagation/loading, but one is about `num_labels` not reaching text config and the other is strict config blocking `granite_speech` loading; different bugs.", "right": "issue:44877"}, {"accept": false, "left": "issue:41720", "reason": "Both mention runtime failures, but one is Qwen3 auto device mapping on A800 and the other is an isolated reproduction of a different issue; no shared root cause.", "right": "issue:43906"}, {"accept": false, "left": "issue:44464", "reason": "Both concern generation/state behavior, but chunked generation with compiled forward and recurrent-state reset with cache are distinct code paths and failures.", "right": "issue:44743"}, {"accept": false, "left": "issue:44843", "reason": "Both are tokenizer/loading-related, but one is an offline HF hub regression and the other is a tokenizer load failure for a specific model; not the same bug.", "right": "issue:44991"}, {"accept": false, "left": "issue:43720", "reason": "One is packed-weight unpacking during accelerate loading, the other is missing custom token IDs after save/load; different objects and failure modes.", "right": "issue:43927"}, {"accept": false, "left": "issue:43606", "reason": "CPU offload device mismatch in bark-small is unrelated to DiaConfig losing token IDs during serialization.", "right": "issue:43927"}, {"accept": false, "left": "issue:43525", "reason": "Both involve missing config fields, but one is `pad_token_id` on `Llama4Config` and the other is missing fields from `AutoConfig.from_pretrained` with trust_remote_code; not the same defect.", "right": "issue:43643"}, {"accept": false, "left": "issue:44466", "reason": "Both discuss tied weights/serialization, but one is inconsistent `lm_head.weight` serialization by device and the other is translation-model weight issues in v5; too broad to treat as one bug.", "right": "issue:45005"}, {"accept": false, "left": "issue:43819", "reason": "DAC latent/forward mismatch with missing STE is unrelated to Whisper `batch_decode()` ignoring `skip_special_tokens`.", "right": "issue:44811"}, {"accept": false, "left": "issue:40444", "reason": "Finetuning multi-image IterableDataset failure and Whisper decode parameter handling are unrelated subsystems and bugs.", "right": "issue:44811"}], "summary": "The proposed soft matches are all too loose to merge into a duplicate cluster. They mostly share broad themes like config handling, generation, or loading, but each issue points to a different code path and failure mode. No single canonical issue is a safe representative."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44625|issue:44877", "issue:41720|issue:43906", "issue:44464|issue:44743", "issue:44843|issue:44991", "issue:43720|issue:43927", "issue:43606|issue:43927", "issue:43525|issue:43643", "issue:44466|issue:45005", "issue:43819|issue:44811", "issue:40444|issue:44811"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6850, "estimated_input_tokens": 3297, "item_count": 18, "node_count": 18, "serialized_chars": 13188, "soft_pair_count": 9}, "cached_at": "2026-04-14T15:58:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0282e3ff51846d7ba1e96bd0c215cae649616580dbca2c184603dab53172025b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:24643", "issue:28282", "issue:29127", "issue:30064", "issue:33357", "issue:33666", "issue:38175", "issue:43122", "issue:43295", "issue:43577", "issue:43697", "issue:43720", "issue:43906", "issue:44361", "issue:44479", "issue:44936", "issue:44977", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43577", "reason": "Different problems: Blip2 dtype loading stays float32 vs RTDetrV2 output changes in v5. No shared code path or concrete duplicate symptom.", "right": "issue:43697"}, {"accept": false, "left": "issue:43720", "reason": "Unrelated areas and failures: BitNet packed-weight loading vs MLukeTokenizer task AttributeError. Similarity is only superficial.", "right": "issue:44361"}, {"accept": false, "left": "issue:24643", "reason": "DeepSpeed training RuntimeError about tensor weight shape is unrelated to LayoutLMv3 box validation/error-message clarity.", "right": "issue:29127"}, {"accept": false, "left": "issue:44977", "reason": "Different subsystems and symptoms: Qwen3.5 flash-attention generation bug vs Kimi-K2.5 tokenizer codec/regression warning issue.", "right": "issue:45356"}, {"accept": false, "left": "issue:38175", "reason": "SIGLIP2 zero-probabilities issue is not the same as the processor.tokenizer regression in v4.57.5.", "right": "issue:43295"}, {"accept": false, "left": "issue:43122", "reason": "The titles do not establish the same bug; one is a tokenizer behavior regression, the other is just an isolated reproduction of another issue.", "right": "issue:43906"}, {"accept": false, "left": "issue:44479", "reason": "Video-input regression for Qwen VL models and trainer.evaluate() failing after train are different code paths and failure modes.", "right": "issue:44936"}, {"accept": false, "left": "issue:30064", "reason": "Void segmentation map processing and Qwen2-VL multi-GPU training are unrelated issues.", "right": "issue:33666"}, {"accept": false, "left": "issue:28282", "reason": "PyTorch import detection failure and MacOS bus error with a CLIP model are different bugs with no concrete overlap.", "right": "issue:33357"}], "summary": "The items form a loose similarity set around Transformers bug reports, but each soft pair refers to a different model, component, or failure mode. I would not merge any of the suggested pairs as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43577|issue:43697", "issue:43720|issue:44361", "issue:24643|issue:29127", "issue:44977|issue:45356", "issue:38175|issue:43295", "issue:43122|issue:43906", "issue:44479|issue:44936", "issue:30064|issue:33666", "issue:28282|issue:33357"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7074, "estimated_input_tokens": 3409, "item_count": 18, "node_count": 18, "serialized_chars": 13633, "soft_pair_count": 10}, "cached_at": "2026-04-14T15:59:49Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c8f408dde7f2fe4c9402e8c267faa76ebf11a8491f477cedccedeefd73916852", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:28282", "issue:32090", "issue:35141", "issue:43329", "issue:43452", "issue:43504", "issue:43531", "issue:43577", "issue:43606", "issue:43653", "issue:43742", "issue:44451", "issue:44464", "issue:44554", "issue:44849", "issue:44977", "issue:45325", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": "43452 is the closest thing to a cluster representative because several other items are also user-visible model-loading regressions, even though the set is too mixed to be a true duplicate cluster.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43452 is the broadest representative of the loading/pretrained-failure subgroup: it names explicit from_pretrained APIs and a concrete regression surface, making it the best single anchor among otherwise heterogeneous reports.", "canonical_pr_reason": null, "confidence": 0.34, "soft_edge_verdicts": [{"accept": false, "left": "issue:28282", "reason": "Different failure modes and code paths: missing PyTorch import for AutoModel vs a Trainer GPU broadcast TypeError involving NoneType.", "right": "issue:32090"}, {"accept": false, "left": "issue:43504", "reason": "Beit pretrained-preset/legacy-field loading is unrelated to Bark CPU-offload device mismatch.", "right": "issue:43606"}, {"accept": false, "left": "issue:28282", "reason": "ImportError on model instantiation is not the same as embeddings being reinitialized after resize when tie_word_embedding is false.", "right": "issue:35141"}, {"accept": false, "left": "issue:43577", "reason": "Both mention tensor dtype/attention behavior, but BLIP2 dtype propagation and the MPS attention head-dim correctness bug are different root causes.", "right": "issue:44554"}, {"accept": false, "left": "issue:44464", "reason": "Chunked generation with compiled forward and Qwen3.5 flash-attention generation bugs are separate model/runtime issues.", "right": "issue:44977"}, {"accept": false, "left": "issue:43329", "reason": "Undefined video-branch helpers in multimodal token counting is not the same as Qwen2.5-VL rope-index scaling for still-image positions.", "right": "issue:45325"}, {"accept": false, "left": "issue:43531", "reason": "Sliding_window behavior in Qwen3-MoE and a MobileLLM key error while loading are unrelated bugs.", "right": "issue:43742"}, {"accept": false, "left": "issue:43452", "reason": "Both are load-time failures, but one is a gguf_file regression in from_pretrained APIs and the other is a model-specific ScandiBERT loading failure; no clear shared code path.", "right": "issue:44451"}, {"accept": false, "left": "issue:43504", "reason": "Legacy preset field handling in Beit is unrelated to BigBirdTokenizer not registering mask token as special.", "right": "issue:43653"}, {"accept": false, "left": "issue:44849", "reason": "Qwen3.5 output_hidden_states behavior and incorrect visual encoder keys on save_pretrained affect different operations and likely different fixes.", "right": "issue:45357"}], "summary": "These issues are broadly Transformers/model-regression reports, but they do not resolve to a single duplicate bug cluster. Most overlaps are superficial keyword matches around loading, generation, or model families. I\u2019d use issue 43452 as the closest representative of the loading/pretrained subgroup, but none of the proposed soft pairs looks like the same underlying change or defect."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:28282|issue:32090", "issue:43504|issue:43606", "issue:28282|issue:35141", "issue:43577|issue:44554", "issue:44464|issue:44977", "issue:43329|issue:45325", "issue:43531|issue:43742", "issue:43452|issue:44451", "issue:43504|issue:43653", "issue:44849|issue:45357"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6498, "estimated_input_tokens": 3121, "item_count": 17, "node_count": 17, "serialized_chars": 12482, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:00:16Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0e1949d0bc085d6dc134becf81c4f8e5a2455b249ad930a3ab6b25ac1a3a2a5e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41720", "issue:43012", "issue:43334", "issue:43525", "issue:43540", "issue:43572", "issue:43577", "issue:43716", "issue:43742", "issue:43927", "issue:44361", "issue:44464", "issue:44479", "issue:44625", "issue:44821", "issue:45200", "issue:45229"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43927", "reason": "Both involve token-related failures, but one is a DiaConfig save/load regression losing custom IDs and causing generation IndexError, while the other is an MLukeTokenizer AttributeError on tasks; different models and failure modes.", "right": "issue:44361"}, {"accept": false, "left": "issue:45200", "reason": "Gemma 4 text-only fine-tuning missing mm_token_type_ids is a config/defaulting bug, while the CUDA OOM report is a multi-GPU inference memory issue; unrelated code paths.", "right": "issue:45229"}, {"accept": false, "left": "issue:43012", "reason": "Both mention precision/dtype, but one is a PyTorch warning during compiling a bfloat16 model, and the other is Blip2 leaving dtype fields at float32 after load; different symptoms and fixes.", "right": "issue:43577"}, {"accept": false, "left": "issue:44479", "reason": "Qwen video-input regression across several models is an input-processing bug, whereas Qwen3.5 num_labels propagation is a config wiring issue; not the same underlying defect.", "right": "issue:44625"}, {"accept": false, "left": "issue:43334", "reason": "Both mention missing pad token fields, but Qwen3-VL pad_token_id and StableLmConfig pad_token_idx are separate model/config regressions with different attributes and downstream failures.", "right": "issue:43572"}, {"accept": false, "left": "issue:43540", "reason": "Qwen3OmniMoe video ValueError and Mistral-3 image preprocessor dtype mismatch are both multimodal issues, but they affect different modalities and code paths.", "right": "issue:43716"}, {"accept": false, "left": "issue:41720", "reason": "Qwen3 auto device mapping causing cudaErrorAssert is a model placement/runtime crash, while chunked generation inconsistency with compiled forward is a generation correctness issue; not duplicates.", "right": "issue:44464"}, {"accept": false, "left": "issue:43525", "reason": "Llama4 missing pad_token_id and StableLmConfig missing pad_token_idx are similar in shape, but they are distinct model-specific config attribute regressions.", "right": "issue:43572"}, {"accept": false, "left": "issue:43742", "reason": "MobileLLM-125M key error on load and AutoImageProcessor failing from URL are both loading issues, but they involve different components and error types.", "right": "issue:44821"}], "summary": "The cluster is heterogeneous: the proposed soft pairs share surface-level themes but describe different underlying bugs in different model families and code paths. No pair looks safe to merge as duplicates, and there is no single clear canonical issue for the whole set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43927|issue:44361", "issue:45200|issue:45229", "issue:43012|issue:43577", "issue:44479|issue:44625", "issue:43334|issue:43572", "issue:43540|issue:43716", "issue:41720|issue:44464", "issue:43525|issue:43572", "issue:43742|issue:44821"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6850, "estimated_input_tokens": 3297, "item_count": 17, "node_count": 17, "serialized_chars": 13188, "soft_pair_count": 11}, "cached_at": "2026-04-14T16:00:53Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3554ca5123a4a7508279eee07e1ba4ad5f7a1b5779ef50e6d36e8ff196b69e82", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:32090", "issue:33666", "issue:34567", "issue:36010", "issue:43262", "issue:43278", "issue:43493", "issue:43688", "issue:43756", "issue:43825", "issue:43844", "issue:43873", "issue:44060", "issue:44488", "issue:44843", "issue:44913", "issue:45084"], "result": {"analyst_result": {"best_issue_reason": "Issue 44843 is the most actionable and well-scoped issue here, with a precise reproduction and a clear fix target; it is the strongest representative item in an otherwise mixed cluster.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44843 is the clearest standalone bug report in the set: it names a specific failing code path (_patch_mistral_regex / model_info) and a concrete failure mode (HF_HUB_OFFLINE breakage).", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:32090", "reason": "Trainer `_gpu_broadcast_one` NoneType error and Qwen2-VL multi-GPU training are different problems; one is a concrete runtime exception, the other is a broad training capability issue.", "right": "issue:33666"}, {"accept": false, "left": "issue:32090", "reason": "Unsupported NoneType in GPU broadcast and `num_input_tokens_seen` not updating involve different Trainer internals and different symptoms.", "right": "issue:34567"}, {"accept": false, "left": "issue:43756", "reason": "Both mention rotary concepts, but one is a Smollm3 layer-selection bug and the other is a GPTNeoX config reload issue; different code paths and failure modes.", "right": "issue:44913"}, {"accept": false, "left": "issue:43278", "reason": "Embedding dtype mismatch during evaluation and auxiliary-loss normalization are unrelated training correctness bugs.", "right": "issue:43688"}, {"accept": false, "left": "issue:43493", "reason": "SigLIP2 HF-vs-JAX discrepancy is a model implementation bug; the pipeline translation-message issue is unrelated UI/error-text behavior.", "right": "issue:43825"}, {"accept": false, "left": "issue:43262", "reason": "Audio processor chat-template sample-rate defaulting and template compilation TypeError are different subsystems and failures.", "right": "issue:45084"}, {"accept": false, "left": "issue:44488", "reason": "Both touch model loading, but one is a missing model load and the other is offline-mode breakage in tokenizer patching; not the same bug.", "right": "issue:44843"}, {"accept": false, "left": "issue:43688", "reason": "Incorrect auxiliary-loss normalization and quantization/offloading behavior are distinct issues with no shared concrete code path.", "right": "issue:43873"}, {"accept": false, "left": "issue:43278", "reason": "BF16-to-FP32 dtype drift in evaluation is unrelated to the ZeRO-3 gradient increase problem.", "right": "issue:43844"}, {"accept": false, "left": "issue:33666", "reason": "Qwen2-VL multi-GPU training and GenerationMixin import failure are separate issues affecting different features.", "right": "issue:36010"}, {"accept": false, "left": "issue:43756", "reason": "Smollm3 RoPE-layer dropping and Qwen3-Next tied-weights warning are model-specific configuration bugs, not the same underlying defect.", "right": "issue:44060"}], "summary": "The cluster is heterogeneous: training/runtime errors, model-specific config bugs, tokenizer/offline behavior, audio processing, and quantization/offloading issues. None of the soft pairs look like the same underlying bug or change, so all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:32090|issue:33666", "issue:32090|issue:34567", "issue:43756|issue:44913", "issue:43278|issue:43688", "issue:43493|issue:43825", "issue:43262|issue:45084", "issue:44488|issue:44843", "issue:43688|issue:43873", "issue:43278|issue:43844", "issue:33666|issue:36010", "issue:43756|issue:44060"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6574, "estimated_input_tokens": 3159, "item_count": 17, "node_count": 17, "serialized_chars": 12636, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:01:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4d2328fc7fb260bfee4bfba422d0635914ffcd312f16dd3ad496ec7bd8323f34", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33666", "issue:36331", "issue:39692", "issue:43257", "issue:43299", "issue:43493", "issue:43540", "issue:43824", "issue:43844", "issue:44423", "issue:44589", "issue:44734", "issue:44964", "issue:44977", "issue:45072", "issue:45198", "issue:45405"], "result": {"analyst_result": {"best_issue_reason": "issue:43257 is the most clearly scoped, reproducible bug report in the set, but only as a standalone issue\u2014not as a cluster canonical.", "best_pr_reason": null, "canonical_issue_reason": "There is no single canonical issue because these reports do not describe the same underlying bug; they span different model families and failure modes.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43257", "reason": "Both involve Qwen3 MoE loading, but one is an accelerate+deepspeed weight conversion problem and the other is a transformers version/model-loading regression for Qwen3VL MoE. Different model variants and different code paths.", "right": "issue:43299"}, {"accept": false, "left": "issue:45072", "reason": "Different model families and failures: dtype mismatch in SwitchTransformers/TimmWrapperModel inference versus save_pretrained/tokenization failure in Wav2Vec2. Not the same bug.", "right": "issue:45198"}, {"accept": false, "left": "issue:43540", "reason": "Both are Qwen3-family multimodal reports, but one is a video-input ValueError in Qwen3OmniMoe and the other is a flash-attention generation issue in Qwen3.5. Too different to merge.", "right": "issue:44977"}, {"accept": false, "left": "issue:44423", "reason": "Both are serve-related crashes, but one is a multimodal continuous-batching AttributeError and the other is a KV-cache continuation tensor-indexing bug on /v1/responses. Distinct endpoints and root causes.", "right": "issue:44734"}, {"accept": false, "left": "issue:43493", "reason": "Completely different problems: SigLIP2 HF/JAX discrepancy versus gradient blow-up under HfDeepSpeedConfig + ZeRO-3. No shared underlying bug.", "right": "issue:43844"}, {"accept": false, "left": "issue:44964", "reason": "One is a model-load failure for Phi-4-multimodal-instruct; the other is a packaging/version pin issue with MIN_PEFT_VERSION. Different cause and fix.", "right": "issue:45405"}, {"accept": false, "left": "issue:39692", "reason": "Same model family only at a high level, but one is a broken documentation example and the other is an implementation discrepancy versus JAX. Not the same underlying issue.", "right": "issue:43493"}, {"accept": false, "left": "issue:33666", "reason": "Multi-GPU training for Qwen2-VL versus a CustomTrainer signature incompatibility. No evidence they are the same bug.", "right": "issue:36331"}, {"accept": false, "left": "issue:43824", "reason": "Unrelated failures: missing Qwen2_5_VL import versus missing Float8_e4m3fnStorage. Different symbols, different subsystems.", "right": "issue:44589"}], "summary": "No duplicate cluster here: the items are a mix of unrelated issues across different models, training/runtime paths, and docs/example failures. All soft pairs are best treated as separate reports."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43257|issue:43299", "issue:45072|issue:45198", "issue:43540|issue:44977", "issue:44423|issue:44734", "issue:43493|issue:43844", "issue:44964|issue:45405", "issue:39692|issue:43493", "issue:33666|issue:36331", "issue:43824|issue:44589"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6932, "estimated_input_tokens": 3338, "item_count": 18, "node_count": 18, "serialized_chars": 13349, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:02:09Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "108ccc9502e9e2a7986ba7c97b99fb85f2ec378db9d2280bf3057a46ebca3ac0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33357", "issue:33666", "issue:39401", "issue:39692", "issue:43295", "issue:43329", "issue:43572", "issue:43606", "issue:43646", "issue:43688", "issue:43746", "issue:43994", "issue:44291", "issue:44466", "issue:44493", "issue:44521", "issue:44568", "issue:45290"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.9, "soft_edge_verdicts": [{"accept": false, "left": "issue:44521", "reason": "Both involve apply_chat_template, but one is about all-zero assistant masks for multimodal inputs while the other is a crash on tool-call assistant messages with no content; different failure modes and inputs.", "right": "issue:45290"}, {"accept": false, "left": "issue:43572", "reason": "Both are v5 regressions in loading/model init, but one is a missing StableLmConfig field and the other is an unexpected _is_hf_initialized argument in init_empty_weights; separate code paths.", "right": "issue:44291"}, {"accept": false, "left": "issue:43688", "reason": "Different models and bugs: auxiliary loss normalization vs PEFT-adapter loading from local checkpoints.", "right": "issue:43746"}, {"accept": false, "left": "issue:43646", "reason": "One is custom model initialization breaking in v5; the other is device-dependent lm_head.weight serialization/tied-weights behavior. Not the same change.", "right": "issue:44466"}, {"accept": false, "left": "issue:43994", "reason": "SigLIP2 wrong outputs with AutoModel/pipeline is not the same as widespread unexpected position_id keys; symptoms and affected paths differ.", "right": "issue:44493"}, {"accept": false, "left": "issue:43329", "reason": "Multimodal video token counting bug vs CPU-offload device mismatch in bark-small; unrelated subsystems and fixes.", "right": "issue:43606"}, {"accept": false, "left": "issue:33357", "reason": "MacOS CLIP bus error and Qwen2-VL multi-GPU training are unrelated.", "right": "issue:33666"}, {"accept": false, "left": "issue:39692", "reason": "Both touch multimodal processor/tokenizer usage, but 39692 is a SigLIP2 docs example with model/processor mismatch plus quantization failure, while 43295 is a regression in processor.tokenizer/image passing; not the same concrete bug.", "right": "issue:43295"}, {"accept": false, "left": "issue:39401", "reason": "Wrong offset_mapping in Qwen3 tokenizer and missing BOS/EOS with add_special_tokens are distinct tokenizer bugs.", "right": "issue:44568"}], "summary": "The items are largely unrelated; none of the soft pairs look like the same underlying bug/change, so there is no clear canonical issue or PR for this set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44521|issue:45290", "issue:43572|issue:44291", "issue:43688|issue:43746", "issue:43646|issue:44466", "issue:43994|issue:44493", "issue:43329|issue:43606", "issue:33357|issue:33666", "issue:39692|issue:43295", "issue:39401|issue:44568"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6646, "estimated_input_tokens": 3195, "item_count": 17, "node_count": 17, "serialized_chars": 12780, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:02:40Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "74e8b5264ad67d91e51ed59461b0d2af57142e173889fadeeb4389da5f232c09", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:15354", "issue:38175", "issue:43232", "issue:43257", "issue:43278", "issue:43540", "issue:43575", "issue:43582", "issue:43701", "issue:43844", "issue:44315", "issue:44792", "issue:44849", "issue:44964", "issue:44977", "issue:45200", "issue:45405"], "result": {"analyst_result": {"best_issue_reason": "Issue 43257 is the most discussion-heavy and has the most inbound references, so it is the best anchor if one must be chosen, but it is not a duplicate of the others.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out because the issues target different concrete failures rather than one shared root cause.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43575", "reason": "OOM during tensor-parallel model load vs. an unreleased PEFT version requirement; different failure modes and code paths.", "right": "issue:45405"}, {"accept": false, "left": "issue:43232", "reason": "Generation kwargs update after sync_gpus vs. Qwen3.5 output_hidden_states behavior; both are generation-related but not the same bug.", "right": "issue:44849"}, {"accept": false, "left": "issue:44792", "reason": "Janus image-generation test failure is unrelated to a PEFT version bump.", "right": "issue:45405"}, {"accept": false, "left": "issue:43257", "reason": "Qwen3 MOE weight conversion with accelerate/deepspeed is unrelated to dependency versioning.", "right": "issue:45405"}, {"accept": false, "left": "issue:15354", "reason": "TorchScript export failure for ViT GeneratorExp vs. zero probabilities in SigLIP2; distinct models and symptoms.", "right": "issue:38175"}, {"accept": false, "left": "issue:43540", "reason": "Video input validation in Qwen3OmniMoe vs. Gemma 4 token-type defaults; different models and different inputs.", "right": "issue:45200"}, {"accept": false, "left": "issue:43278", "reason": "Embedding dtype drift in eval vs. checkpoint key mismatch; separate training/runtime issues.", "right": "issue:43701"}, {"accept": false, "left": "issue:44315", "reason": "Liger Kernel application with model_init is a setup/initialization issue, not the same as Qwen3.5 hidden_states output bug.", "right": "issue:44849"}, {"accept": false, "left": "issue:43844", "reason": "ZeRO-3 gradient anomaly during training vs. loading Phi-4 multimodal on latest transformers; unrelated code paths.", "right": "issue:44964"}, {"accept": false, "left": "issue:43582", "reason": "Apple Silicon caching allocator TypeError vs. Qwen3.5 flash-attention generation bug; no shared underlying defect.", "right": "issue:44977"}], "summary": "These items are a loose collection of unrelated issues across different models, generation paths, training configs, and platform/version problems. I do not see any pair that looks like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43575|issue:45405", "issue:43232|issue:44849", "issue:44792|issue:45405", "issue:43257|issue:45405", "issue:15354|issue:38175", "issue:43540|issue:45200", "issue:43278|issue:43701", "issue:44315|issue:44849", "issue:43844|issue:44964", "issue:43582|issue:44977"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6628, "estimated_input_tokens": 3186, "item_count": 17, "node_count": 17, "serialized_chars": 12743, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:03:09Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ea73548125ef9590f1fb02f7096ccfb67591ebb37e9340ebe13f0090fbdd913a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:37428", "issue:43334", "issue:43525", "issue:43606", "issue:43688", "issue:43824", "issue:43957", "issue:44112", "issue:44442", "issue:44534", "issue:44554", "issue:44898", "issue:45198", "issue:45200", "issue:45325", "issue:45356", "issue:45412"], "result": {"analyst_result": {"best_issue_reason": "If one issue must anchor the cluster, 43957 is the broadest user-facing regression because it affects model loading across multiple models and has the most general impact.", "best_pr_reason": null, "canonical_issue_reason": "No clear canonical issue exists for the cluster as a whole; the reports are mostly unrelated, so any single issue would be a poor representative.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:37428", "reason": "Different failures: one is a missing internal flash-attn helper import, the other is a meta-device model loading/config bug. Same library, but not the same code-path problem.", "right": "issue:43957"}, {"accept": false, "left": "issue:43606", "reason": "Both mention device/CI issues, but the affected models and failure modes differ: CPU offload mismatch in Bark vs stale device override in GraniteSpeech.", "right": "issue:44112"}, {"accept": false, "left": "issue:43688", "reason": "Unrelated bugs: auxiliary loss normalization vs non-persistent buffers being initialized with junk. No shared concrete code path.", "right": "issue:44534"}, {"accept": false, "left": "issue:45200", "reason": "Different model families and distinct bugs: Gemma 4 text-only mm_token_type_ids defaulting vs Qwen2.5-VL rope index scaling.", "right": "issue:45325"}, {"accept": false, "left": "issue:44554", "reason": "Completely different areas: MPS attention correctness vs tokenizer codec/regex regression. Not mergeable as one fix.", "right": "issue:45356"}, {"accept": false, "left": "issue:43334", "reason": "Very similar symptom class (missing pad_token_id on config), but these are separate model configs and not clearly the same underlying bug.", "right": "issue:43525"}, {"accept": false, "left": "issue:44442", "reason": "Both are tokenizer-related, but they affect different audio models with different breakages (AutoTokenizer loading vs save_pretrained/tokenization).", "right": "issue:45198"}, {"accept": false, "left": "issue:37428", "reason": "Both are import errors, but one is a missing helper function and the other a missing model class; the concrete code paths differ.", "right": "issue:43824"}, {"accept": false, "left": "issue:44898", "reason": "Different bug classes: image positional interpolation failure vs memory not being released on deletion.", "right": "issue:45412"}], "summary": "This cluster is heterogeneous: it mixes unrelated import/export errors, model-config attribute errors, tokenizer issues, attention correctness, memory lifecycle, and CI/device regressions. None of the soft pairs look like the same underlying bug with high enough confidence to merge."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:37428|issue:43957", "issue:43606|issue:44112", "issue:43688|issue:44534", "issue:45200|issue:45325", "issue:44554|issue:45356", "issue:43334|issue:43525", "issue:44442|issue:45198", "issue:37428|issue:43824", "issue:44898|issue:45412"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6586, "estimated_input_tokens": 3165, "item_count": 17, "node_count": 17, "serialized_chars": 12658, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:03:45Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ee2aff2cdca8e6587dccdd3304500556d7f641de59feb507265b892a3b5383ee", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:40990", "issue:43295", "issue:43388", "issue:43493", "issue:43526", "issue:43572", "issue:43701", "issue:43844", "issue:44360", "issue:44466", "issue:44561", "issue:44589", "issue:44610", "issue:44625", "issue:44936", "issue:44987", "issue:45127"], "result": {"analyst_result": {"best_issue_reason": "Issue 44625 is the best overall issue to keep as the cluster representative because it is central, still open, and has broad downstream interest.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44625 is the strongest representative: it is open, has the highest inbound reference count in this set, and describes a concrete actionable regression about config propagation.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:44466", "reason": "Both involve tied weights and model collapse/serialization, but one is about device-dependent serialization of lm_head.weight and the other is about LoRA merge behavior with extended vocab; different bugs and code paths.", "right": "issue:45127"}, {"accept": false, "left": "issue:43388", "reason": "Both mention labels, but one is a Trainer/gather_for_metrics batching bug and the other is a BeitImageProcessorFast reduce_labels bug; not the same underlying defect.", "right": "issue:43526"}, {"accept": false, "left": "issue:43295", "reason": "These are different regressions in processor handling: one breaks processor.tokenizer/image passing, the other has a processor output size mismatch for OmDet-Turbo.", "right": "issue:44610"}, {"accept": false, "left": "issue:43493", "reason": "SigLIP2 implementation discrepancy and resume_from_checkpoint key mismatch are unrelated problems with no shared code-path indication.", "right": "issue:43701"}, {"accept": false, "left": "issue:40990", "reason": "Perplexity on gpt-oss-20b and Qwen3.5 num_labels propagation are completely different model/config issues.", "right": "issue:44625"}, {"accept": false, "left": "issue:44360", "reason": "DSA indexer activation behavior and removal of is_torch_fx_available affecting trust_remote_code models are unrelated.", "right": "issue:44561"}, {"accept": false, "left": "issue:43572", "reason": "Both are version-related model loading issues, but the specific failures and affected components differ; not the same bug.", "right": "issue:44987"}, {"accept": false, "left": "issue:43572", "reason": "Missing pad_token_idx in StableLmConfig and Float8 storage lookup failure are distinct runtime/configuration problems.", "right": "issue:44589"}, {"accept": false, "left": "issue:43844", "reason": "ZeRO-3 gradient inflation and trainer.evaluate() failing after trainer.train() are different training/runtime behaviors with no concrete shared defect.", "right": "issue:44936"}], "summary": "The cluster is a loose set of unrelated bug reports with only superficial overlap in a few titles. None of the soft pairs look like the same underlying bug or change, so no duplicates should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44466|issue:45127", "issue:43388|issue:43526", "issue:43295|issue:44610", "issue:43493|issue:43701", "issue:40990|issue:44625", "issue:44360|issue:44561", "issue:43572|issue:44987", "issue:43572|issue:44589", "issue:43844|issue:44936"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6982, "estimated_input_tokens": 3363, "item_count": 18, "node_count": 18, "serialized_chars": 13450, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:05:39Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b028651ae7b9a5e7a97693557ab485f627444bc9c212190d8a7db14d12835f14", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38175", "issue:43278", "issue:43295", "issue:43450", "issue:43493", "issue:43606", "issue:43827", "issue:43856", "issue:43901", "issue:43994", "issue:44462", "issue:44561", "issue:44568", "issue:44610", "issue:44779", "issue:44857", "issue:44991", "issue:45083"], "result": {"analyst_result": {"best_issue_reason": "issue:43493 is the strongest representative of the only true duplicate-like theme in this set and is the most likely root-cause report for the SigLIP2 failures.", "best_pr_reason": null, "canonical_issue_reason": "issue:43493 is the best canonical issue because it describes the broadest likely root cause in the SigLIP2 subcluster: a mismatch between HF and the original JAX implementation. That makes it a better umbrella than the more symptom-specific zero-probability report.", "canonical_pr_reason": null, "confidence": 0.78, "soft_edge_verdicts": [{"accept": false, "left": "issue:43278", "reason": "Different problems: one is a dtype regression in evaluation, the other is memory inefficiency during Qwen3 MoE training.", "right": "issue:43856"}, {"accept": false, "left": "issue:44462", "reason": "Both involve v5-era tokenizer/model loading behavior, but one is about ignoring tokenizer.json while the other is about a removed utility breaking trust_remote_code models.", "right": "issue:44561"}, {"accept": false, "left": "issue:43295", "reason": "Different modalities and failures: processor.tokenizer/image passing regression vs batched video processor output shape.", "right": "issue:43450"}, {"accept": false, "left": "issue:43295", "reason": "The titles point to different failures: a processor API regression versus SigLIP2 producing bad outputs. Not the same underlying bug.", "right": "issue:43994"}, {"accept": false, "left": "issue:44568", "reason": "Both are tokenizer-related, but they are model-specific loading/behavior issues for different repositories and likely different causes.", "right": "issue:44991"}, {"accept": false, "left": "issue:43827", "reason": "Both are docs-related, but they target different outdated references and different pipeline guidance.", "right": "issue:43901"}, {"accept": false, "left": "issue:43606", "reason": "Device-mismatch CPU offload bug is unrelated to the qwen3_omni_moe helper function behavior.", "right": "issue:45083"}, {"accept": false, "left": "issue:44561", "reason": "Both are v5 regressions, but one is a removed API breaking trust_remote_code while the other is a tokenizer correctness issue for DeepSeek.", "right": "issue:44779"}, {"accept": false, "left": "issue:44610", "reason": "Different code paths: OmDet-Turbo preprocessing size mismatch versus an AMP/CUDA crash in LwDetrImageLoss.", "right": "issue:44857"}, {"accept": true, "left": "issue:38175", "reason": "Same SigLIP2 family and same symptom class: incorrect outputs compared with expected behavior/JAX implementation. The zero-probability report looks like a concrete manifestation of the broader discrepancy.", "right": "issue:43493"}], "summary": "Mostly a heterogeneous cluster with several unrelated bug reports. The only clear duplicate-like pair is the SigLIP2 implementation discrepancy/incorrect output reports; the rest differ by model, subsystem, or root cause and should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43278|issue:43856", "issue:44462|issue:44561", "issue:43295|issue:43450", "issue:43295|issue:43994", "issue:44568|issue:44991", "issue:43827|issue:43901", "issue:43606|issue:45083", "issue:44561|issue:44779", "issue:44610|issue:44857", "issue:38175|issue:43493"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6992, "estimated_input_tokens": 3368, "item_count": 18, "node_count": 18, "serialized_chars": 13470, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:06:13Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "750a72981adc46f4edc68d3d482533634b3a75413bd95933d17d24525ea1e881", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:37428", "issue:43425", "issue:43450", "issue:43493", "issue:43575", "issue:43653", "issue:44112", "issue:44186", "issue:44351", "issue:44448", "issue:44488", "issue:44568", "issue:44743", "issue:45127", "issue:45198", "issue:45245", "issue:45290", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "issue:44488 has the clearest general compatibility/loading symptom and a substantial report, making it the most reasonable single issue to anchor the cluster despite the lack of real duplication.", "best_pr_reason": null, "canonical_issue_reason": "issue:44488 is the closest thing to a broadly user-facing runtime loading regression, but the cluster is heterogeneous, so it is only a weak representative rather than a true duplicate hub.", "canonical_pr_reason": null, "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "issue:45127", "reason": "Different failure classes: LoRA merge/tied embeddings collapse vs category-cardinality RuntimeError. No shared code-path or fix target.", "right": "issue:45245"}, {"accept": false, "left": "issue:44488", "reason": "A model loading failure and a tokenizer special-token regression are distinct bugs affecting different paths and likely different fixes.", "right": "issue:44568"}, {"accept": false, "left": "issue:43450", "reason": "Both are vision-related, but one is a batched video processor shape bug and the other is a SigLIP2 implementation discrepancy; not the same underlying issue.", "right": "issue:43493"}, {"accept": false, "left": "issue:45290", "reason": "Chat-template tool-call handling and qwen2.5-vl video position IDs are separate mechanisms with different symptoms and code paths.", "right": "issue:45381"}, {"accept": false, "left": "issue:37428", "reason": "Importing a private flash-attention helper and Torch 2.10 incompatibility are unrelated breakages; one is a symbol import error, the other a version compatibility issue.", "right": "issue:43425"}, {"accept": false, "left": "issue:43653", "reason": "BigBirdTokenizer special-token registration and a GraniteSpeech CI stale device override are unrelated; one is a tokenizer bug, the other a test flake.", "right": "issue:44112"}, {"accept": false, "left": "issue:37428", "reason": "Both are import errors, but they involve different missing symbols/modules and different underlying regressions.", "right": "issue:44351"}, {"accept": false, "left": "issue:44186", "reason": "LayoutLMv2 NER/batched padding crashes and Wav2Vec2 save_pretrained/tokenization failures are model-specific tokenizer issues, not the same defect.", "right": "issue:45198"}, {"accept": false, "left": "issue:44448", "reason": "Pegasus output changes across versions and Qwen3.5 recurrent-state reset behavior are unrelated model-specific correctness bugs.", "right": "issue:44743"}, {"accept": false, "left": "issue:43450", "reason": "A batched video processor shape bug and a tensor-parallel OOM when loading Qwen2-57B-A14B are different problems with different remediation paths.", "right": "issue:43575"}], "summary": "This cluster is mostly a grab bag of unrelated Transformers bugs: import errors, tokenizer regressions, vision/video shape issues, CI flakes, and model-specific loading/runtime failures. None of the proposed soft pairs look like true duplicates or mergeable as the same underlying fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:45127|issue:45245", "issue:44488|issue:44568", "issue:43450|issue:43493", "issue:45290|issue:45381", "issue:37428|issue:43425", "issue:43653|issue:44112", "issue:37428|issue:44351", "issue:44186|issue:45198", "issue:44448|issue:44743", "issue:43450|issue:43575"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6634, "estimated_input_tokens": 3189, "item_count": 17, "node_count": 17, "serialized_chars": 12754, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:07:16Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5f6e000aff7bef02e6104bad5d7d555e64208013dee2cd772240cb58a215144d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29942", "issue:32090", "issue:39692", "issue:43262", "issue:43278", "issue:43404", "issue:43450", "issue:43526", "issue:43819", "issue:43825", "issue:44112", "issue:44265", "issue:44855", "issue:45083", "issue:45198", "issue:45200", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "No issue is a strong global representative for the cluster because none of the items plausibly subsumes the others as the same bug.", "best_pr_reason": null, "canonical_issue_reason": "No single issue is a good canonical for this cluster; the issues describe unrelated failures in different models, modalities, and code paths.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:39692", "reason": "Different bugs in different image processors: SigLIP2 docs example errors vs BeitImageProcessorFast label reduction.", "right": "issue:43526"}, {"accept": false, "left": "issue:29942", "reason": "Unrelated failures: Flash Attention 2 test failure vs Trainer/_gpu_broadcast_one NoneType error.", "right": "issue:32090"}, {"accept": false, "left": "issue:43278", "reason": "Different problem classes: dtype drift during evaluate vs an incorrect pipeline error message about translation support.", "right": "issue:43825"}, {"accept": false, "left": "issue:44265", "reason": "Different code paths and symptoms: torch.export with torch_compilable_check vs Wav2Vec2 save_pretrained/tokenization failure.", "right": "issue:45198"}, {"accept": false, "left": "issue:43819", "reason": "DAC latent/forward mismatch is unrelated to a GraniteSpeech CI stale device override test.", "right": "issue:44112"}, {"accept": false, "left": "issue:44855", "reason": "Python 3.13 import/parser issue in DebertaV2Model is unrelated to qwen3_omni_moe helper output-length behavior.", "right": "issue:45083"}, {"accept": false, "left": "issue:43262", "reason": "Audio chat-template sampling-rate default bug is unrelated to batched video processor shape errors.", "right": "issue:43450"}, {"accept": false, "left": "issue:45200", "reason": "Different multimodal issues: Gemma 4 text-only fine-tuning token IDs vs qwen2.5-vl video position IDs.", "right": "issue:45381"}, {"accept": false, "left": "issue:43404", "reason": "Different underlying bugs: untied lm_head weights in Mistral3 vs missing/default mm_token_type_ids in Gemma 4.", "right": "issue:45200"}], "summary": "The cluster is not a true duplicate set: the items span unrelated bugs across audio, vision, text generation, export, CI, and docs. All soft-similarity pairs are superficial and should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:39692|issue:43526", "issue:29942|issue:32090", "issue:43278|issue:43825", "issue:44265|issue:45198", "issue:43819|issue:44112", "issue:44855|issue:45083", "issue:43262|issue:43450", "issue:45200|issue:45381", "issue:43404|issue:45200"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6794, "estimated_input_tokens": 3269, "item_count": 17, "node_count": 17, "serialized_chars": 13075, "soft_pair_count": 11}, "cached_at": "2026-04-14T16:09:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7444c85b3a6e012a47f9d1a3782caf9c4435d97d646805673429c4cb0fd09658", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38175", "issue:39692", "issue:43066", "issue:43335", "issue:43450", "issue:43526", "issue:43550", "issue:43611", "issue:43819", "issue:43931", "issue:44448", "issue:44462", "issue:44493", "issue:44779", "issue:44977", "issue:45083", "issue:45229"], "result": {"analyst_result": {"best_issue_reason": "issue:44493 is the best single issue to anchor this cluster because it spans many models and describes a general loading/behavior regression rather than a narrow model-specific defect.", "best_pr_reason": null, "canonical_issue_reason": "issue:44493 is the broadest, most cross-cutting regression report in the set and best represents the cluster\u2019s general v5 compatibility theme.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43611", "reason": "Both are loading failures, but one is a base_model_prefix regression and the other is a Qwen3-VL shape mismatch; different failure modes and code paths.", "right": "issue:43931"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 docs example errors/quantization mismatch is a documentation/example problem, not the same as the position-id unexpected-key regression.", "right": "issue:44493"}, {"accept": false, "left": "issue:43819", "reason": "DAC.from_latents mismatch is a model-internal forward-path bug, while 44493 is a broader state-dict key regression; not the same issue.", "right": "issue:44493"}, {"accept": false, "left": "issue:44448", "reason": "Both mention v5 regressions, but they affect different models and subsystems (Pegasus generation vs Deepseek tokenizer behavior).", "right": "issue:44779"}, {"accept": false, "left": "issue:38175", "reason": "Unexpected zero probabilities in SigLIP2 is a model output issue, not the same as a load-time unexpected key problem.", "right": "issue:44493"}, {"accept": false, "left": "issue:44977", "reason": "Both concern Gemma4 inference, but one reports flash-attention generation behavior and the other CUDA OOM; different underlying problems.", "right": "issue:45229"}, {"accept": false, "left": "issue:43450", "reason": "Both are processor shape/array handling bugs, but they are in different processor classes and different transformations.", "right": "issue:43526"}, {"accept": false, "left": "issue:43550", "reason": "Bamba torch.compile with SDPA and qwen3_omni_moe feature-length helper behavior are unrelated code paths and symptoms.", "right": "issue:45083"}, {"accept": false, "left": "issue:43066", "reason": "Both involve tokenization, but one is a wrong decoder type in v5 and the other is AutoTokenizer ignoring tokenizer.json; distinct bugs.", "right": "issue:44462"}, {"accept": false, "left": "issue:43335", "reason": "SwitchTransformers sparse-layer config creation and qwen3_omni_moe feature-length calculation are unrelated implementation areas.", "right": "issue:45083"}, {"accept": false, "left": "issue:43335", "reason": "One is a config construction bug, the other a torch.compile/SDPA runtime failure; not the same underlying defect.", "right": "issue:43550"}], "summary": "These issues are mostly unrelated model- or feature-specific bugs that happen to share broad Transformers-v5 regression language. None of the soft pairs look like the same underlying bug/change closely enough to merge."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43611|issue:43931", "issue:39692|issue:44493", "issue:43819|issue:44493", "issue:44448|issue:44779", "issue:38175|issue:44493", "issue:44977|issue:45229", "issue:43450|issue:43526", "issue:43550|issue:45083", "issue:43066|issue:44462", "issue:43335|issue:45083", "issue:43335|issue:43550"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7096, "estimated_input_tokens": 3420, "item_count": 18, "node_count": 18, "serialized_chars": 13678, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:10:28Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4f85a8aa10561a1a20af7167ce843f9e1203e28789b3d5cdd7c9119025b5e607", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29127", "issue:32090", "issue:40444", "issue:43335", "issue:43504", "issue:43526", "issue:43606", "issue:43653", "issue:43723", "issue:44448", "issue:44554", "issue:44857", "issue:44898", "issue:44977", "issue:45072", "issue:45198", "issue:45372", "issue:45406"], "result": {"analyst_result": {"best_issue_reason": "issue:40444 is the strongest representative on metadata alone: it has the most discussion, a clear user-facing failure mode, and enough detail to stand in as the cluster\u2019s anchor if one must be chosen.", "best_pr_reason": null, "canonical_issue_reason": "issue:40444 is the most active and detailed item in the set (highest discussion activity, substantial body, concrete repro), so it is the best provisional anchor even though the cluster does not look cohesive.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:29127", "reason": "Different failures in different components: LayoutLMv3 box/error messaging vs Trainer/_gpu_broadcast_one NoneType handling. No shared code-path bug.", "right": "issue:32090"}, {"accept": false, "left": "issue:40444", "reason": "Different models and symptoms: Qwen2.5-VL iterable dataset with multiple images vs BEiT Fast image processor reduce_labels returning the wrong shape/value. Same broad vision area, but not the same bug.", "right": "issue:43526"}, {"accept": false, "left": "issue:45372", "reason": "Both mention Gemma 4, but one is an ImportError from mistral_common and the other is a missing _tokenizer attribute in serve. These are distinct failures and not clearly one fix.", "right": "issue:45406"}, {"accept": false, "left": "issue:44857", "reason": "Both are dtype/precision-related, but they affect different models and different execution paths (LwDetr loss crash on float16 AMP vs SwitchTransformers/TimmWrapperModel bfloat16 inference mismatch).", "right": "issue:45072"}, {"accept": false, "left": "issue:43723", "reason": "Tokenizer loading in v5 and an MPS attention correctness issue are unrelated bugs in different subsystems.", "right": "issue:44554"}, {"accept": false, "left": "issue:44448", "reason": "Both are generation regressions, but they concern different models and likely different causes (v4/v5 Pegasus output drift vs Qwen3.5 flash-attention behavior).", "right": "issue:44977"}, {"accept": false, "left": "issue:43335", "reason": "SwitchTransformers config bug and BigBird tokenizer special-token registration are unrelated; only superficial transformer-model overlap.", "right": "issue:43653"}, {"accept": false, "left": "issue:43606", "reason": "CPU-offload device mismatch for bark-small is a different failure mode from LwDetrImageLoss float16/CUDA crash.", "right": "issue:44857"}, {"accept": false, "left": "issue:44898", "reason": "Perceiver image-classification positional-encoding failure and Wav2Vec2 save_pretrained/tokenization failure are different modalities and code paths.", "right": "issue:45198"}, {"accept": false, "left": "issue:43335", "reason": "Both involve BEiT/SwitchTransformers-style config/loading problems only at a very broad level; the actual bugs are different (sparse-layer creation vs legacy-field pretrained loading).", "right": "issue:43504"}], "summary": "The cluster is highly heterogeneous: the soft-similarity pairs mostly share only broad subsystem words (model, tokenizer, dtype, Gemma, etc.) but appear to describe different bugs. I would not merge any of the proposed soft edges."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:29127|issue:32090", "issue:40444|issue:43526", "issue:45372|issue:45406", "issue:44857|issue:45072", "issue:43723|issue:44554", "issue:44448|issue:44977", "issue:43335|issue:43653", "issue:43606|issue:44857", "issue:44898|issue:45198", "issue:43335|issue:43504"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6902, "estimated_input_tokens": 3323, "item_count": 17, "node_count": 17, "serialized_chars": 13290, "soft_pair_count": 11}, "cached_at": "2026-04-14T16:11:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3fb24c522a9cf32d82c67a4782fa39545032c02c11430f65253af4033a17bb0a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34689", "issue:35141", "issue:37428", "issue:41720", "issue:43278", "issue:43329", "issue:43335", "issue:43550", "issue:43653", "issue:43720", "issue:43873", "issue:44112", "issue:44493", "issue:44987", "issue:45072", "issue:45083", "issue:45229"], "result": {"analyst_result": {"best_issue_reason": "#43873 is the best single representative because it is active, clearly reproducible, and broad enough to stand in for the cluster, while the other reports are narrower and more model-specific.", "best_pr_reason": null, "canonical_issue_reason": "No true duplicate cluster emerges; if one issue must represent the set, #43873 is the broadest open runtime bug and the most representative of the user-facing failures here.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:37428", "reason": "Both involve loading errors, but they are different failures: a missing flash-attention import versus a model load failure on a specific repo/version path.", "right": "issue:44987"}, {"accept": false, "left": "issue:43335", "reason": "Unrelated bugs: sparse-layer creation for zero sparse layers vs a stale device-override CI failure in GraniteSpeech.", "right": "issue:44112"}, {"accept": false, "left": "issue:43653", "reason": "Different code paths and symptoms: tokenizer special-token decode behavior vs feature-length helper behavior in qwen3_omni_moe.", "right": "issue:45083"}, {"accept": false, "left": "issue:43335", "reason": "These are distinct issues: SwitchTransformers layer construction bug versus dtype mismatch in bfloat16 inference for different models.", "right": "issue:45072"}, {"accept": false, "left": "issue:43873", "reason": "Both mention memory/runtime problems, but quantization offloading is not the same as multi-GPU CUDA OOM on Gemma4.", "right": "issue:45229"}, {"accept": false, "left": "issue:43550", "reason": "Different failures and triggers: torch.compile with SDPA on Bamba-9B-v2 versus a CI device-override regression in GraniteSpeech.", "right": "issue:44112"}, {"accept": false, "left": "issue:43873", "reason": "Quantization/offloading behavior is unrelated to unexpected position-id key errors during model loading.", "right": "issue:44493"}, {"accept": false, "left": "issue:34689", "reason": "Both concern loading/embedding behavior, but one is a Llama 3.2 vision model load break and the other is post-init reinitialization after resizing embeddings.", "right": "issue:35141"}, {"accept": false, "left": "issue:41720", "reason": "Different model-specific GPU failures: Qwen3 auto device mapping cuda assert versus Gemma4 multi-GPU OOM.", "right": "issue:45229"}, {"accept": false, "left": "issue:43720", "reason": "Packed-weight unpacking during accelerate loading is a distinct bug from the GraniteSpeech stale device-override test failure.", "right": "issue:44112"}, {"accept": false, "left": "issue:43278", "reason": "These affect different layers of the stack: dtype drift between train/eval versus an undefined-variable bug in multimodal token counting for video.", "right": "issue:43329"}], "summary": "These items are mostly unrelated transformer bugs that only share broad surface-level themes like loading, dtype, quantization, or model-specific failures. None of the soft pairs looks like the same underlying bug/change, so no duplicate merges are justified."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:37428|issue:44987", "issue:43335|issue:44112", "issue:43653|issue:45083", "issue:43335|issue:45072", "issue:43873|issue:45229", "issue:43550|issue:44112", "issue:43873|issue:44493", "issue:34689|issue:35141", "issue:41720|issue:45229", "issue:43720|issue:44112", "issue:43278|issue:43329"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7144, "estimated_input_tokens": 3444, "item_count": 18, "node_count": 18, "serialized_chars": 13774, "soft_pair_count": 11}, "cached_at": "2026-04-14T16:11:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b7a99fa21d5554ee4eb7cbf516a0ea7624e67726764cf1cbcfcd8db9b9c2d549", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:25251", "issue:29942", "issue:32090", "issue:33357", "issue:35141", "issue:42915", "issue:42994", "issue:43065", "issue:43994", "issue:44442", "issue:44448", "issue:44466", "issue:44734", "issue:44871", "issue:44977", "issue:45072", "issue:45406", "issue:45412"], "result": {"analyst_result": {"best_issue_reason": "No single issue is a good representative for the cluster; the reported problems are too different in component and failure mode.", "best_pr_reason": null, "canonical_issue_reason": "No clear canonical issue: the items are heterogeneous and do not describe the same underlying bug or change.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43065", "reason": "Different bugs: one is about a dummy Conv2d in Sam3PixelDecoder, the other about bad outputs from SigLIP2 with AutoModel/pipeline.", "right": "issue:43994"}, {"accept": false, "left": "issue:29942", "reason": "Unrelated failures: Flash Attention 2 test breakage vs token embedding resizing reinitializing lm head output weights.", "right": "issue:35141"}, {"accept": false, "left": "issue:43065", "reason": "Different code paths and symptoms: dummy Conv2d construction vs RT-DETR memory not being released on deletion.", "right": "issue:45412"}, {"accept": false, "left": "issue:32090", "reason": "Not the same underlying problem: Trainer broadcast TypeError on NoneType vs MacOS bus error loading a CLIP community model.", "right": "issue:33357"}, {"accept": false, "left": "issue:44442", "reason": "Different subsystems and errors: tokenizer loading failure vs dtype mismatches in bfloat16 inference.", "right": "issue:45072"}, {"accept": false, "left": "issue:44448", "reason": "Both are regression-style reports, but one is about model output changes across versions and the other about weight serialization/tied weights; not the same bug.", "right": "issue:44466"}, {"accept": false, "left": "issue:43994", "reason": "No overlap in failure mode: SigLIP2 inference output corruption vs RT-DETR memory lifecycle leak.", "right": "issue:45412"}, {"accept": false, "left": "issue:42915", "reason": "Both involve quantization, but one is a FineGrainedFP8Config failure for Qwen3Moe and the other is quantized model saving; distinct issues.", "right": "issue:42994"}, {"accept": false, "left": "issue:44734", "reason": "Different serving issues: KV-cache continuation tensor indexing crash vs Gemma4Processor missing _tokenizer attribute.", "right": "issue:45406"}, {"accept": false, "left": "issue:25251", "reason": "Unrelated bugs: pipeline top_k output shape change vs trainer GPU broadcast rejecting NoneType.", "right": "issue:32090"}, {"accept": false, "left": "issue:44871", "reason": "Different model behaviors: Gemma-3 eos_token_id config inconsistency vs Qwen3.5 flash-attention generation failure.", "right": "issue:44977"}], "summary": "This cluster does not appear to contain any true duplicate pairs. The issues span unrelated regressions and bugs across tokenization, serving, quantization, model loading, serialization, and runtime/device-specific failures."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43065|issue:43994", "issue:29942|issue:35141", "issue:43065|issue:45412", "issue:32090|issue:33357", "issue:44442|issue:45072", "issue:44448|issue:44466", "issue:43994|issue:45412", "issue:42915|issue:42994", "issue:44734|issue:45406", "issue:25251|issue:32090", "issue:44871|issue:44977"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6988, "estimated_input_tokens": 3366, "item_count": 18, "node_count": 18, "serialized_chars": 13463, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:12:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1d3ec6d57f334ab2c104001b661070615f9d25607cf1411859ed0d88d8d58cac", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29942", "issue:30064", "issue:33357", "issue:39692", "issue:43257", "issue:43278", "issue:43381", "issue:43653", "issue:43824", "issue:44206", "issue:44279", "issue:44442", "issue:44479", "issue:44610", "issue:44743", "issue:45103", "issue:45372", "issue:45412"], "result": {"analyst_result": {"best_issue_reason": "No issue is a clear cluster representative because the pairwise similarities are superficial rather than the same underlying defect.", "best_pr_reason": "No pull requests are present in this cluster.", "canonical_issue_reason": "No single canonical issue fits the cluster; the issues span unrelated tokenizer, vision, evaluation, import, and documentation regressions.", "canonical_pr_reason": "No pull requests are present in this cluster.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43653", "reason": "Both are tokenizer-related, but one is a BigBird special-token/decoding bug and the other is an AutoTokenizer loading failure for a different tokenizer class.", "right": "issue:44442"}, {"accept": false, "left": "issue:43257", "reason": "These concern different failures: Qwen3 MoE weight conversion with accelerate+deepspeed versus a training/eval dtype mismatch in an embedding layer.", "right": "issue:43278"}, {"accept": false, "left": "issue:43278", "reason": "One is about BF16-to-FP32 dtype drift, the other about gradient checkpointing in eval mode; different symptoms and code paths.", "right": "issue:43381"}, {"accept": false, "left": "issue:44206", "reason": "Unsupported processor args for one feature extractor vs a video-input regression across several Qwen variants are not the same bug.", "right": "issue:44479"}, {"accept": false, "left": "issue:45103", "reason": "Auto-docstring annotation handling and a mistral_common import failure are unrelated issues in different subsystems.", "right": "issue:45372"}, {"accept": false, "left": "issue:43653", "reason": "Tokenizer special-token registration and Qwen3.5 recurrent-state reset with cache are completely separate defects.", "right": "issue:44743"}, {"accept": false, "left": "issue:43824", "reason": "Both mention dependencies/imports, but one is a missing exported class and the other is a broad dependency issue without the same concrete failure.", "right": "issue:44279"}, {"accept": false, "left": "issue:44610", "reason": "Processor output-size mismatch and a model memory-leak-on-delete problem are different code-path bugs.", "right": "issue:45412"}, {"accept": false, "left": "issue:29942", "reason": "Flash Attention test failures and void segmentation map processing failures are unrelated model/preprocessing problems.", "right": "issue:30064"}, {"accept": false, "left": "issue:33357", "reason": "A MacOS bus error in a CLIP model is not the same as SigLIP2 documentation example errors and quantization mismatch.", "right": "issue:39692"}], "summary": "This cluster is heterogeneous: the soft pairs are topical lookalikes, but they describe different bugs and code paths, so none should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43653|issue:44442", "issue:43257|issue:43278", "issue:43278|issue:43381", "issue:44206|issue:44479", "issue:45103|issue:45372", "issue:43653|issue:44743", "issue:43824|issue:44279", "issue:44610|issue:45412", "issue:29942|issue:30064", "issue:33357|issue:39692"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6796, "estimated_input_tokens": 3270, "item_count": 18, "node_count": 18, "serialized_chars": 13077, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:13:04Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "eae1bd16215f9e461b436d23a2b44d383c3c59b13d698793b40aa84dfd80841e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29127", "issue:32090", "issue:33290", "issue:34567", "issue:43262", "issue:43334", "issue:43381", "issue:43504", "issue:43526", "issue:43531", "issue:43701", "issue:44186", "issue:44206", "issue:44387", "issue:44857", "issue:44977", "issue:45229", "issue:45245"], "result": {"analyst_result": {"best_issue_reason": "No single best issue exists here; each report describes a distinct bug in a different code path or component.", "best_pr_reason": null, "canonical_issue_reason": "No canonical issue: the items are not duplicates of one another, and the apparent similarities are only broad surface-level overlaps.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:29127", "reason": "Different components and failure modes: LayoutLMv3 box validation/error clarity vs TrainerState token-count tracking.", "right": "issue:34567"}, {"accept": false, "left": "issue:43334", "reason": "Both touch Qwen-related models, but one is a missing pad_token_id on load and the other is a sliding_window issue on Qwen3-MoE; different bugs.", "right": "issue:43531"}, {"accept": false, "left": "issue:32090", "reason": "Trainer broadcast TypeError with NoneType is unrelated to Adafactor + DeepSpeed OOM.", "right": "issue:33290"}, {"accept": false, "left": "issue:43504", "reason": "Different model/loss paths and symptoms: BEiT pretrained-loading legacy field vs LwDetrImageLoss crashing under float16 AMP.", "right": "issue:44857"}, {"accept": false, "left": "issue:44977", "reason": "Qwen3.5 flash-attention generation bug is unrelated to the 2^24 category-count limit error.", "right": "issue:45245"}, {"accept": false, "left": "issue:44387", "reason": "Both mention OOM, but one is a Transformers 5.x int4 quantization memory regression and the other is Gemma4 multi-GPU inference OOM; likely different causes.", "right": "issue:45229"}, {"accept": false, "left": "issue:43526", "reason": "BeitImageProcessorFast label reduction is unrelated to LayoutLMv2Tokenizer crashes on NER batching/padding.", "right": "issue:44186"}, {"accept": false, "left": "issue:43262", "reason": "Audio chat-template sample-rate default mismatch is unrelated to LasrFeatureExtractor passing an unsupported center arg.", "right": "issue:44206"}, {"accept": false, "left": "issue:43381", "reason": "Gradient checkpointing failing in eval mode is a different issue from a resume_from_checkpoint key mismatch.", "right": "issue:43701"}], "summary": "These issues are a heterogeneous grab-bag; none of the soft pairs look like the same underlying bug, so all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:29127|issue:34567", "issue:43334|issue:43531", "issue:32090|issue:33290", "issue:43504|issue:44857", "issue:44977|issue:45245", "issue:44387|issue:45229", "issue:43526|issue:44186", "issue:43262|issue:44206", "issue:43381|issue:43701"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6696, "estimated_input_tokens": 3220, "item_count": 17, "node_count": 17, "serialized_chars": 12878, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:13:48Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c580944cbdb5e5abadc3d50f090ccd0582a568ea8602168ff4af6dc0a4b9ca78", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29127", "issue:29942", "issue:33357", "issue:35141", "issue:36010", "issue:42915", "issue:43232", "issue:43388", "issue:43526", "issue:43550", "issue:43606", "issue:43701", "issue:43931", "issue:43994", "issue:44442", "issue:44857", "issue:44898"], "result": {"analyst_result": {"best_issue_reason": "Issue 43701 is the best issue to keep as a representative example of a meaningful, actionable bug report in this mixed cluster, though it is not a duplicate of the others.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43701 is the most representative standalone bug report in the set: it has the highest inbound references, solid discussion activity, and describes a concrete reproducible training/runtime failure rather than a narrow model-specific edge case.", "canonical_pr_reason": null, "confidence": 0.86, "soft_edge_verdicts": [{"accept": false, "left": "issue:44442", "reason": "Different code paths: tokenizer loading versus Perceiver image classification resizing. Shared failure wording is too broad to indicate the same bug.", "right": "issue:44898"}, {"accept": false, "left": "issue:43232", "reason": "One is generation kwargs handling after sync_gpus; the other is metrics gathering dropping tuple labels. These are distinct runtime paths and defects.", "right": "issue:43388"}, {"accept": false, "left": "issue:29942", "reason": "Flash Attention 2 test failures and a GenerationMixin import error are unrelated symptoms in different areas of the library.", "right": "issue:36010"}, {"accept": false, "left": "issue:29127", "reason": "LayoutLMv3 error messaging and a MacOS CLIP bus error are clearly different problems with no shared failing code path.", "right": "issue:33357"}, {"accept": false, "left": "issue:43550", "reason": "torch.compile/SDPA failure in Bamba-9B-v2 is unrelated to resume_from_checkpoint key mismatch; different subsystem and failure mode.", "right": "issue:43701"}, {"accept": false, "left": "issue:43526", "reason": "BeitImageProcessorFast label reduction and LwDetrImageLoss AMP crashes are separate bugs in different components.", "right": "issue:44857"}, {"accept": false, "left": "issue:43606", "reason": "CPU offload device mismatch for bark-small is a model/device placement bug, not the same as checkpoint key mismatch.", "right": "issue:43701"}, {"accept": false, "left": "issue:42915", "reason": "Both mention model loading issues, but one is FineGrainedFP8Config on Qwen3Moe and the other is a weight-shape mismatch on Qwen3-VL; the underlying problems differ.", "right": "issue:43931"}, {"accept": false, "left": "issue:29127", "reason": "Error-message clarity for insufficient boxes and token-embedding reinitialization are unrelated bugs.", "right": "issue:35141"}, {"accept": false, "left": "issue:43526", "reason": "reduce_labels returning a single label and nonsensical SigLIP2 outputs are not the same defect or code path.", "right": "issue:43994"}], "summary": "These issues are largely unrelated and span different subsystems (model loading, generation, image processors, training utilities, and hardware/runtime-specific failures). The soft pairs mostly show superficial topic overlap rather than the same underlying bug or change, so none should be merged as duplicates. If one issue must act as the cluster representative, issue 43701 is the strongest standalone candidate because it is well-scoped, broadly relevant to training flows, and has the highest engagement among the set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44442|issue:44898", "issue:43232|issue:43388", "issue:29942|issue:36010", "issue:29127|issue:33357", "issue:43550|issue:43701", "issue:43526|issue:44857", "issue:43606|issue:43701", "issue:42915|issue:43931", "issue:29127|issue:35141", "issue:43526|issue:43994"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6534, "estimated_input_tokens": 3139, "item_count": 17, "node_count": 17, "serialized_chars": 12556, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:14:30Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "025b7a912e9fdb98d275d961045fc8c7c48e06d4d472c7dba39111a9befcfd6d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43316", "issue:43525", "issue:43575", "issue:43646", "issue:43673", "issue:43701", "issue:43761", "issue:43931", "issue:43994", "issue:44077", "issue:44188", "issue:44206", "issue:44479", "issue:44743", "issue:44811", "issue:45216", "issue:45290"], "result": {"analyst_result": {"best_issue_reason": "issue:43646 is the most general and reusable as a representative issue, though it should not be treated as a true duplicate of the others.", "best_pr_reason": null, "canonical_issue_reason": "issue:43646 is the closest umbrella-style report because it describes a broad Transformers v5 initialization regression, but the rest of the set spans distinct bugs and model families.", "canonical_pr_reason": null, "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "issue:43761", "reason": "Different subsystems and symptoms: CLIPVisionModel hidden_states regression vs attention-kernel divergence under torch.compile.", "right": "issue:44188"}, {"accept": false, "left": "issue:44479", "reason": "Different code paths: video-input regression in Qwen VL models vs apply_chat_template crash on assistant tool-call messages.", "right": "issue:45290"}, {"accept": false, "left": "issue:43316", "reason": "Both are config/API mismatches, but they concern different models and different missing attributes.", "right": "issue:43525"}, {"accept": false, "left": "issue:44743", "reason": "Unrelated bugs: Qwen3.5 recurrent-state reset with cache vs Whisper batch_decode ignoring skip_special_tokens.", "right": "issue:44811"}, {"accept": false, "left": "issue:43994", "reason": "Different failures in different components: nonsensical SigLIP2 outputs vs LasrFeatureExtractor crashing on an unsupported argument.", "right": "issue:44206"}, {"accept": false, "left": "issue:43931", "reason": "Both involve Qwen3-family models, but one is a load-time shape mismatch and the other is a save_pretrained checkpoint regression.", "right": "issue:45216"}, {"accept": false, "left": "issue:43673", "reason": "Both mention cache behavior, but the concrete bugs and affected paths differ: GenerationMixin chunked_prefill vs modular_qwen3_5 recurrent state handling.", "right": "issue:44743"}, {"accept": false, "left": "issue:43646", "reason": "The shared theme is initialization/post_init, but these are not the same concrete bug or fixable in one PR.", "right": "issue:44077"}, {"accept": false, "left": "issue:43575", "reason": "Completely different issues: tensor-parallel OOM while loading a model vs checkpoint key mismatch on resume.", "right": "issue:43701"}], "summary": "This cluster is mostly a set of unrelated Transformers regressions across different models and APIs; the soft-similarity links are superficial and should not be merged as duplicates. There are no PRs in the cluster."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43761|issue:44188", "issue:44479|issue:45290", "issue:43316|issue:43525", "issue:44743|issue:44811", "issue:43994|issue:44206", "issue:43931|issue:45216", "issue:43673|issue:44743", "issue:43646|issue:44077", "issue:43575|issue:43701"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6788, "estimated_input_tokens": 3266, "item_count": 18, "node_count": 18, "serialized_chars": 13063, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:14:59Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3ba4407af79c42a8130ffd1641e6b73e93d75caa869708867e6e5b1dcead0636", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29942", "issue:33290", "issue:33666", "issue:34567", "issue:34689", "issue:39692", "issue:41628", "issue:43493", "issue:43575", "issue:43611", "issue:43653", "issue:43701", "issue:43827", "issue:44077", "issue:44206", "issue:44387", "issue:44479", "issue:45372"], "result": {"analyst_result": {"best_issue_reason": "Issue #45372 is the strongest standalone issue in the set: it has a concrete import-time failure, a clear affected version range, and a specific user-visible impact. It is still not a good cluster-wide canonical because the other items describe different bugs.", "best_pr_reason": null, "canonical_issue_reason": "No single issue cleanly represents the whole cluster because the items span multiple unrelated subsystems and failure modes; if one must be chosen, issue #45372 is the most detailed, actionable, and current report, but it is not a true canonical for the rest.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:41628", "reason": "Both are import-related, but they concern different missing symbols and different failure paths: `AutoImageProcessor` import failure versus `ReasoningEffort`/Gemma processor loading.", "right": "issue:45372"}, {"accept": false, "left": "issue:43575", "reason": "Both mention OOM, but the contexts differ materially: tensor-parallel loading of a specific Qwen model versus increased CUDA reserved memory under int4 quantization.", "right": "issue:44387"}, {"accept": false, "left": "issue:29942", "reason": "Flash Attention 2 test failures and Qwen2-VL multi-GPU training are unrelated problems in different areas with no sign of the same bug or fix.", "right": "issue:33666"}, {"accept": false, "left": "issue:43653", "reason": "A tokenizer special-token registration bug is not the same as a resume-from-checkpoint key mismatch; different code paths and symptoms.", "right": "issue:43701"}, {"accept": false, "left": "issue:43493", "reason": "One is a SigLIP2 implementation discrepancy, the other is stale docs referencing removed pipeline APIs; these are not the same underlying issue.", "right": "issue:43827"}, {"accept": false, "left": "issue:33290", "reason": "An optimizer/deepspeed memory issue and a TrainerState counter not updating are unrelated.", "right": "issue:34567"}, {"accept": false, "left": "issue:34689", "reason": "Both affect model loading, but they are different regressions with different triggers: Llama 3.2 Vision loading in 4.46.2 versus `base_model_prefix` handling in 5.0.0.", "right": "issue:43611"}, {"accept": false, "left": "issue:44077", "reason": "`patchtsmixer` post-init validation and Qwen video-input regressions are separate feature areas with different failure modes.", "right": "issue:44479"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 docs/model-processor mismatch and a `LasrFeatureExtractor` unsupported argument crash are not the same bug or change.", "right": "issue:44206"}], "summary": "This cluster is mostly a grab bag of unrelated closed/open issues across model loading, OOMs, tokenizer bugs, docs regressions, and trainer/state behavior. None of the soft pairs look like the same underlying bug or change, so all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:41628|issue:45372", "issue:43575|issue:44387", "issue:29942|issue:33666", "issue:43653|issue:43701", "issue:43493|issue:43827", "issue:33290|issue:34567", "issue:34689|issue:43611", "issue:44077|issue:44479", "issue:39692|issue:44206"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6684, "estimated_input_tokens": 3214, "item_count": 17, "node_count": 17, "serialized_chars": 12854, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:15:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f862aa75de1052fe20f3db4682fdc0e577a93a9094c18c7d637f721008ad5936", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33290", "issue:34689", "issue:35141", "issue:42175", "issue:43316", "issue:43381", "issue:43540", "issue:43873", "issue:44162", "issue:44190", "issue:44496", "issue:44514", "issue:44792", "issue:44855", "issue:44871", "issue:44877", "issue:45137"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:43381", "reason": "Different failure modes: gradient checkpointing eval-mode restriction vs quantization/offloading behavior.", "right": "issue:43873"}, {"accept": false, "left": "issue:44190", "reason": "Local dataset loading script failure is unrelated to an unrecognized model/config loading error.", "right": "issue:44496"}, {"accept": false, "left": "issue:43873", "reason": "Quantization offloading bug and Gemma-3 eos_token_id mismatch are separate config/runtime issues.", "right": "issue:44871"}, {"accept": false, "left": "issue:44162", "reason": "ESM2 regression and Janus image-generation test failure affect different models and code paths.", "right": "issue:44792"}, {"accept": false, "left": "issue:33290", "reason": "DeepSpeed AdaFactor OOM is unrelated to embedding resize/post_init reinitialization.", "right": "issue:35141"}, {"accept": false, "left": "issue:44855", "reason": "Python 3.13 import parsing error in DebertaV2 is unrelated to DeepSpeed ZeRO3 deque underflow.", "right": "issue:45137"}, {"accept": false, "left": "issue:43540", "reason": "Both are multimodal, but they target different models and distinct bugs in separate processing paths.", "right": "issue:44514"}, {"accept": false, "left": "issue:33290", "reason": "DeepSpeed optimizer memory issue is unrelated to Llama 3.2 Vision model loading regression.", "right": "issue:34689"}, {"accept": false, "left": "issue:43316", "reason": "Both concern config APIs, but they involve different models and different strictness/loading failures.", "right": "issue:44877"}, {"accept": false, "left": "issue:42175", "reason": "Packaging/backend installation issue is not the same as a Gemma3TextConfig API discrepancy.", "right": "issue:43316"}], "summary": "This cluster is not a duplicate set; the issues span unrelated problems across quantization, config loading, multimodal processing, training utilities, and model-specific regressions. All proposed soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43381|issue:43873", "issue:44190|issue:44496", "issue:43873|issue:44871", "issue:44162|issue:44792", "issue:33290|issue:35141", "issue:44855|issue:45137", "issue:43540|issue:44514", "issue:33290|issue:34689", "issue:43316|issue:44877", "issue:42175|issue:43316"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6786, "estimated_input_tokens": 3265, "item_count": 18, "node_count": 18, "serialized_chars": 13057, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:16:15Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7c05b094eedfd13cf320a2bd2b0ab9cfcdbf4fcd1686204dd5b3380cd8417b49", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33290", "issue:34689", "issue:36010", "issue:36331", "issue:41950", "issue:43316", "issue:43381", "issue:43388", "issue:43404", "issue:43441", "issue:43475", "issue:43643", "issue:43688", "issue:43716", "issue:43749", "issue:43856", "issue:43931", "issue:44913"], "result": {"analyst_result": {"best_issue_reason": "If one must be picked, issue:43749 is the strongest representative by discussion/inbound references and is a substantive core loading regression.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue: the cluster is a mix of unrelated bugs (loading, training, config, multimodal pipeline, MoE).", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43749", "reason": "Different failure modes: broken FSDP CPU-efficient loading vs Qwen3-VL weight-shape mismatch during model load.", "right": "issue:43931"}, {"accept": false, "left": "issue:43643", "reason": "Separate config bugs: missing fields with trust_remote_code vs rotary_pct not persisting on reload.", "right": "issue:44913"}, {"accept": false, "left": "issue:43388", "reason": "Training/eval metric aggregation bug vs auxiliary-loss normalization bug; unrelated code paths.", "right": "issue:43688"}, {"accept": false, "left": "issue:43381", "reason": "Gradient checkpointing eval-mode restriction is distinct from MoE memory inefficiency.", "right": "issue:43856"}, {"accept": false, "left": "issue:33290", "reason": "OOM with Adafactor/DeepSpeed is unrelated to CustomTrainer compute_loss kwarg incompatibility.", "right": "issue:36331"}, {"accept": false, "left": "issue:43316", "reason": "API discrepancy in Gemma3TextConfig is unrelated to Mistral-3 image preprocessor/model dtype mismatch.", "right": "issue:43716"}, {"accept": false, "left": "issue:43404", "reason": "Weight tying bug in Mistral3ForConditionalGeneration is a different issue than FlashAttention failure in Ministral-3.", "right": "issue:43441"}, {"accept": false, "left": "issue:34689", "reason": "Model-loading regression for Llama 3.2 Vision is not the same as the GenerationMixin import error.", "right": "issue:36010"}, {"accept": false, "left": "issue:41950", "reason": "Video-classification pipeline using image processors is unrelated to SAM 3 Video missing fpn_position_embeddings.", "right": "issue:43475"}], "summary": "These are not duplicates: each soft pair points to a different subsystem and symptom, so all proposed merges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43749|issue:43931", "issue:43643|issue:44913", "issue:43388|issue:43688", "issue:43381|issue:43856", "issue:33290|issue:36331", "issue:43316|issue:43716", "issue:43404|issue:43441", "issue:34689|issue:36010", "issue:41950|issue:43475"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7000, "estimated_input_tokens": 3372, "item_count": 18, "node_count": 18, "serialized_chars": 13486, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:16:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d36d34e68c0d9dae534c4ecb2780070684441d7dcf638ea074fb1c3020feff49", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:16998", "issue:22355", "issue:25251", "issue:28282", "issue:43065", "issue:43295", "issue:43452", "issue:43761", "issue:43825", "issue:44077", "issue:44242", "issue:44479", "issue:44496", "issue:44534", "issue:44617", "issue:44871", "issue:45127", "issue:45200"], "result": {"analyst_result": {"best_issue_reason": "No issue is a good global representative because the cluster spans multiple unrelated problem areas and model families.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue: the items do not describe the same underlying bug, so there is no clear representative issue for the cluster.", "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:45127", "reason": "Both are Gemma-related, but one is about LoRA merge behavior with extended vocab and tied embeddings, while the other is about mm_token_type_ids defaulting for text-only fine-tuning. Different bugs.", "right": "issue:45200"}, {"accept": false, "left": "issue:43065", "reason": "Different problems: a dummy Conv2d in Sam3PixelDecoder vs CUDA OOM in Sam3Video. Same family, not the same defect.", "right": "issue:44617"}, {"accept": false, "left": "issue:43761", "reason": "CLIPVisionModel hidden_states regression and Qwen video-input regression are unrelated code paths and symptoms.", "right": "issue:44479"}, {"accept": false, "left": "issue:16998", "reason": "Model_max_length question for DeBERTa-V3 is unrelated to the missing transformers.onnx import error.", "right": "issue:22355"}, {"accept": false, "left": "issue:25251", "reason": "Top-k output nesting in pipeline and AutoModel PyTorch-missing ImportError are different issues.", "right": "issue:28282"}, {"accept": false, "left": "issue:43295", "reason": "One is a processor/tokenizer regression; the other is an inaccurate pipeline error message about translation support. Not the same bug.", "right": "issue:43825"}, {"accept": false, "left": "issue:43295", "reason": "Processor/tokenizer access regression and Gemma eos_token_id inconsistency are separate issues.", "right": "issue:44871"}, {"accept": false, "left": "issue:43452", "reason": "gguf_file handling for tokenizer/model loading is distinct from an unrecognized model_type in config.json.", "right": "issue:44496"}, {"accept": false, "left": "issue:44077", "reason": "PatchTSMixer post_init allowance and non-persistent buffer junk are unrelated implementation bugs.", "right": "issue:44534"}, {"accept": false, "left": "issue:43761", "reason": "CLIPVisionModel hidden_states regression and missing load-balancing loss when output_router_logits=False are different model behaviors in different subsystems.", "right": "issue:44242"}], "summary": "This cluster is heterogeneous rather than duplicate-heavy: the issues cover unrelated bugs in model loading, pipeline messaging, SAM/CLIP/Qwen regressions, and training/config behavior. All soft pairs should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:45127|issue:45200", "issue:43065|issue:44617", "issue:43761|issue:44479", "issue:16998|issue:22355", "issue:25251|issue:28282", "issue:43295|issue:43825", "issue:43295|issue:44871", "issue:43452|issue:44496", "issue:44077|issue:44534", "issue:43761|issue:44242"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6464, "estimated_input_tokens": 3104, "item_count": 17, "node_count": 17, "serialized_chars": 12413, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:17:36Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "762d1bffc4b061203ea226a4a6b3c967422e30634a7aee62afec2be284145cb7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42617", "issue:43065", "issue:43575", "issue:43646", "issue:43742", "issue:43906", "issue:43931", "issue:44060", "issue:44479", "issue:44521", "issue:44610", "issue:44734", "issue:44913", "issue:44933", "issue:45290", "issue:45362", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "No issue is a strong canonical for the whole set because the topics are heterogeneous; if forced, issue:44521 is the most active/open and broadly relevant, but it is not a duplicate of the others.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:43646", "reason": "Both concern config/init behavior, but one is a broad Transformers 5.0 custom-initialization regression and the other is GPTNeoX rotary_pct persistence on reload; different models, code paths, and failure modes.", "right": "issue:44913"}, {"accept": false, "left": "issue:43906", "reason": "Both involve chat/template-related repros, but 43906 is an isolated reproduction of a linked upstream issue while 45290 is a crash on assistant messages with tool calls and no content; not the same bug.", "right": "issue:45290"}, {"accept": false, "left": "issue:43931", "reason": "These are different Qwen3-family loading/tie-weight issues: one is a weight-shape mismatch on model load, the other is an incorrect tied-weights warning involving embed_tokens and linear_attn.dt_bias.", "right": "issue:44060"}, {"accept": false, "left": "issue:44521", "reason": "Both are multimodal/video-related, but 44521 is about all-zero assistant_masks from apply_chat_template, while 45381 is about wrong vision_position_ids for qwen2.5-vl video input; distinct symptoms and fixes.", "right": "issue:45381"}, {"accept": false, "left": "issue:44933", "reason": "A missing import from image_utils is unrelated to Qwen3.5 chat crashes; they do not share the same concrete code path or underlying defect.", "right": "issue:45362"}, {"accept": false, "left": "issue:44479", "reason": "One is a video input regression affecting several Qwen VL models, the other is a serve KV-cache continuation crash from wrong tensor indexing; same broad area, but different bugs.", "right": "issue:44734"}, {"accept": false, "left": "issue:42617", "reason": "Running 3d_parallel.py and OOM when loading Qwen2-57B-A14B with tensor parallelism are different failures with different causes.", "right": "issue:43575"}, {"accept": false, "left": "issue:43575", "reason": "These are both model-loading problems, but one is OOM with a large Qwen2 model under tensor parallelism and the other is a key error loading MobileLLM-125M; not the same issue.", "right": "issue:43742"}, {"accept": false, "left": "issue:43065", "reason": "A dummy Conv2d in Sam3PixelDecoder is unrelated to OmDet-Turbo processor producing the wrong input size; different models and different bugs.", "right": "issue:44610"}], "summary": "This set is not a single duplicate cluster: it spans unrelated bugs in model loading, config reloads, chat templating, multimodal/video preprocessing, serving, and miscellaneous model-specific failures. None of the soft pairs look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43646|issue:44913", "issue:43906|issue:45290", "issue:43931|issue:44060", "issue:44521|issue:45381", "issue:44933|issue:45362", "issue:44479|issue:44734", "issue:42617|issue:43575", "issue:43575|issue:43742", "issue:43065|issue:44610"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6520, "estimated_input_tokens": 3132, "item_count": 17, "node_count": 17, "serialized_chars": 12527, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:18:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "598db494ba9a940ec02b6ef3deb7c2edcb69c129e3925b3385a57c8a7e5003c0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:32090", "issue:35141", "issue:41720", "issue:43388", "issue:43575", "issue:43582", "issue:43650", "issue:43827", "issue:43994", "issue:44220", "issue:44464", "issue:44534", "issue:44792", "issue:44871", "issue:44913", "issue:44977", "issue:45245"], "result": {"analyst_result": {"best_issue_reason": "No single issue is a good canonical representative because the cluster is heterogeneous; if forced, issue 41720 is among the more concrete runtime bug reports, but it is not a duplicate of the others.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:44792", "reason": "Different failure modes and models: janus image-generation test failure vs Qwen3.5 flash-attention generation behavior.", "right": "issue:44977"}, {"accept": false, "left": "issue:41720", "reason": "Unrelated bugs: Qwen3 auto device mapping cuda assert vs a category-cardinality RuntimeError.", "right": "issue:45245"}, {"accept": false, "left": "issue:44464", "reason": "Different underlying problems: compiled chunked generation inconsistency vs Gemma-3 eos_token_id config mismatch.", "right": "issue:44871"}, {"accept": false, "left": "issue:41720", "reason": "Both mention Qwen and hardware/resource issues, but one is cudaErrorAssert during auto device mapping and the other is OOM during tensor-parallel loading; not the same bug.", "right": "issue:43575"}, {"accept": false, "left": "issue:32090", "reason": "Trainer NoneType GPU broadcast error is unrelated to embedding resize/post_init reinitialization with untied word embeddings.", "right": "issue:35141"}, {"accept": false, "left": "issue:43827", "reason": "One is a docs cleanup after pipeline removals; the other is a model-specific incorrect output report. Same API surface, different underlying issue.", "right": "issue:43994"}, {"accept": false, "left": "issue:44534", "reason": "Non-persistent buffer initialization bug is not the same as GPTNeoXConfig rotary_pct not persisting on reload.", "right": "issue:44913"}, {"accept": false, "left": "issue:43388", "reason": "Different subsystems and symptoms: gather_for_metrics truncation vs Apple Silicon allocator warmup TypeError.", "right": "issue:43582"}, {"accept": false, "left": "issue:43650", "reason": "These are too vague and appear unrelated: a generic request vs an _torch_extract_fbank_features() bug.", "right": "issue:44220"}], "summary": "This cluster is not a true duplicate set: the items span many unrelated bugs (generation, config reload, docs, allocator, OOM, trainer, etc.). All soft pairs look like false positives based on different models, different code paths, or one docs issue vs one runtime bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44792|issue:44977", "issue:41720|issue:45245", "issue:44464|issue:44871", "issue:41720|issue:43575", "issue:32090|issue:35141", "issue:43827|issue:43994", "issue:44534|issue:44913", "issue:43388|issue:43582", "issue:43650|issue:44220"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7086, "estimated_input_tokens": 3415, "item_count": 18, "node_count": 18, "serialized_chars": 13657, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:18:40Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a879e815080c9f8fe0ff48d929eb5cc381fd5e6ed511ca48a7521916922d3c39", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43335", "issue:43540", "issue:43644", "issue:43761", "issue:43976", "issue:44077", "issue:44079", "issue:44190", "issue:44355", "issue:44496", "issue:44734", "issue:44743", "issue:44855", "issue:44913", "issue:44964", "issue:45127", "issue:45357", "issue:45372"], "result": {"analyst_result": {"best_issue_reason": "No global best issue for deduping purposes; these should remain separate reports rather than being merged into one canonical thread.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out because the cluster is not a duplicate set; the items span unrelated subsystems and failure modes.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:44190", "reason": "Different problems: local dataset loading in a training script vs model loading failure for a specific multimodal checkpoint.", "right": "issue:44964"}, {"accept": false, "left": "issue:45127", "reason": "Different code paths: LoRA merge with extended vocab/tied embeddings vs saving incorrect visual encoder keys for Qwen3.5.", "right": "issue:45357"}, {"accept": false, "left": "issue:43761", "reason": "Different models and regressions: CLIPVisionModel hidden_states output vs Qwen3.5 recurrent state reset with cache.", "right": "issue:44743"}, {"accept": false, "left": "issue:44355", "reason": "One is about running compiled Python files; the other is a Python 3.13 TorchScript parsing/IndentationError issue.", "right": "issue:44855"}, {"accept": false, "left": "issue:43335", "reason": "SwitchTransformers sparse-layer config bug is unrelated to the DebertaV2 TorchScript parse failure.", "right": "issue:44855"}, {"accept": false, "left": "issue:43976", "reason": "These are separate Python compatibility regressions with different root causes and failure surfaces.", "right": "issue:44855"}, {"accept": false, "left": "issue:44496", "reason": "Unrecognized model/config.json loading is unrelated to the mistral_common ReasoningEffort import error.", "right": "issue:45372"}, {"accept": false, "left": "issue:43540", "reason": "Qwen3OmniMoe video-input ValueError and transformers serve KV-cache indexing crash are different bugs.", "right": "issue:44734"}, {"accept": false, "left": "issue:44077", "reason": "PatchTSMixer post_init allowance and ModelOutput key assignment are separate behaviors in different code paths.", "right": "issue:44079"}, {"accept": false, "left": "issue:43644", "reason": "Non-persistent buffer initialization junk and GPTNeoX rotary_pct reload defaulting are unrelated state/serialization bugs.", "right": "issue:44913"}], "summary": "This cluster is heterogeneous: the issues touch different models, serving paths, config serialization, Python-version compatibility, and unrelated regressions. None of the suggested soft pairs look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44190|issue:44964", "issue:45127|issue:45357", "issue:43761|issue:44743", "issue:44355|issue:44855", "issue:43335|issue:44855", "issue:43976|issue:44855", "issue:44496|issue:45372", "issue:43540|issue:44734", "issue:44077|issue:44079", "issue:43644|issue:44913"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6766, "estimated_input_tokens": 3255, "item_count": 17, "node_count": 17, "serialized_chars": 13020, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:19:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ef80653df4768d5f6ce11f479ab335a1e45c1704420a654f34d683b6df8c06db", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36246", "issue:38175", "issue:38617", "issue:39692", "issue:41628", "issue:41950", "issue:42175", "issue:43065", "issue:43116", "issue:43295", "issue:43335", "issue:43404", "issue:44016", "issue:44190", "issue:44279", "issue:45216", "issue:45276"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43116", "reason": "Both mention classification scripts, but one is about empty multi-label outputs in run_classification.py and the other is about loading a local dataset in run_image_classification_no_trainer.py; different scripts and failure modes.", "right": "issue:44190"}, {"accept": false, "left": "issue:36246", "reason": "Both are import errors, but they concern different symbols in different modules: Qwen2_5_VLImageProcessor vs AutoImageProcessor. No sign of the same broken export or code path.", "right": "issue:41628"}, {"accept": false, "left": "issue:41950", "reason": "Video-classification pipeline using image processors is a runtime pipeline bug; the other is a notebook syntax error. Different artifact types and unrelated fixes.", "right": "issue:44016"}, {"accept": false, "left": "issue:43404", "reason": "One is a tied-weights bug in Mistral3ForConditionalGeneration; the other is a save_pretrained regression for Qwen3.5 checkpoints. Different models and different code paths.", "right": "issue:45216"}, {"accept": false, "left": "issue:43295", "reason": "Both involve model resizing/processor behavior, but one is a regression around processor.tokenizer and passing images to tokenizer, while the other is gemma4 embedding resizing not updating per-layer/output embeddings. Not the same bug.", "right": "issue:45276"}, {"accept": false, "left": "issue:36246", "reason": "Both are ImportErrors, but they involve unrelated missing names from different modules. No shared underlying API break is evident.", "right": "issue:38617"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 example errors are about model/processor mismatch and quantization in a docs example; the other issue is about local dataset loading in an image classification training script. Different repro and fix.", "right": "issue:44190"}, {"accept": false, "left": "issue:43335", "reason": "SwitchTransformers sparse-layer config bug and dataset-loading failure in a training script are unrelated.", "right": "issue:44190"}, {"accept": false, "left": "issue:42175", "reason": "TensorFlow missing from a torch-only install is a dependency/package composition issue, while the other is a vague general transformers dependency problem. Too unspecific and not the same concrete bug.", "right": "issue:44279"}, {"accept": false, "left": "issue:38175", "reason": "siglip2 zero probabilities and Sam3PixelDecoder dummy Conv2d are different model behavior issues with no shared code path.", "right": "issue:43065"}], "summary": "These are mostly unrelated issues spanning imports, model behavior, docs/examples, pipelines, and config regressions. The soft pairs are only superficially similar by shared keywords and should not be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43116|issue:44190", "issue:36246|issue:41628", "issue:41950|issue:44016", "issue:43404|issue:45216", "issue:43295|issue:45276", "issue:36246|issue:38617", "issue:39692|issue:44190", "issue:43335|issue:44190", "issue:42175|issue:44279", "issue:38175|issue:43065"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6514, "estimated_input_tokens": 3129, "item_count": 17, "node_count": 17, "serialized_chars": 12514, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:19:48Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2ed5bbbd63887e2deb54af97889670f015f09166ae7479f6b5ccd1dfdc3ad53d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42907", "issue:43441", "issue:43479", "issue:43854", "issue:44016", "issue:44112", "issue:44186", "issue:44220", "issue:44246", "issue:44466", "issue:44734", "issue:44792", "issue:44871", "issue:44913", "issue:45003", "issue:45245", "issue:45375"], "result": {"analyst_result": {"best_issue_reason": "No global best issue stands out because the cluster does not share one underlying problem or code path.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits this set; the items are not duplicates of one another.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:44871 \u2014 [Gemma-3] Inconsistent eos_token_id configuration", "reason": "Different subsystems and failure modes: tokenizer/config mismatch vs categorical embedding limit.", "right": "issue:45245 \u2014 RuntimeError: number of categories cannot exceed 2^24"}, {"accept": false, "left": "issue:44186 \u2014 LayoutLMv2Tokenizer crashes on NER inputs and batched padding/truncation", "reason": "Unrelated code paths: tokenizer batch handling vs server KV-cache indexing.", "right": "issue:44734 \u2014 transformers serve: /v1/responses crashes on KV cache continuation"}, {"accept": false, "left": "issue:43479 \u2014 Phi4MultimodalConfig incorrectly initializes default vision/audio configs when passed as None", "reason": "Both involve configs, but the bugs are different: default construction from None vs a missing strict field.", "right": "issue:45375 \u2014 Qwen3_5MoeVisionConfig missing deepstack_visual_indexes field"}, {"accept": false, "left": "issue:44016 \u2014 Syntax error in Transformer section 3 notebook", "reason": "Notebook syntax error and audio feature extraction are unrelated.", "right": "issue:44220 \u2014 Issue with _torch_extract_fbank_features()"}, {"accept": false, "left": "issue:44246 \u2014 import transformers takes long sometimes", "reason": "One is an import-performance complaint; the other is an unsafe import-time access bug.", "right": "issue:45003 \u2014 modeling_utils unsafely accesses sys.modules[]"}, {"accept": false, "left": "issue:44466 \u2014 Inconsistent serialization of lm_head.weight depending on model device", "reason": "Both touch save/load behavior, but they affect different objects and mechanisms.", "right": "issue:44913 \u2014 GPTNeoXConfig rotary_pct silently reverts to default on reload"}, {"accept": false, "left": "issue:44112 \u2014 Stale device override test in GraniteSpeech fails on CI", "reason": "These are different failing tests for different models with no shared concrete bug.", "right": "issue:44792 \u2014 Failed test case test_model_generate_images for janus model"}, {"accept": false, "left": "issue:42907 \u2014 Failing to Save Dequantized Ministrals/Devstrals", "reason": "Same model family, but the failures are distinct: dequantized saving vs FlashAttention integration.", "right": "issue:43441 \u2014 Ministral-3 fails with FlashAttention in Transformers v5 RC"}, {"accept": false, "left": "issue:43854 \u2014 Unable to load zai-org/GLM-4.7-Flash model correctly in the unit tests", "reason": "Different models and different failure surfaces: model loading vs image generation test behavior.", "right": "issue:44792 \u2014 Failed test case test_model_generate_images for janus model"}], "summary": "This cluster is heterogeneous: it spans unrelated model-config, tokenizer, serving, notebook, and CI/test failures. None of the soft pairs look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44871|issue:45245", "issue:44186|issue:44734", "issue:43479|issue:45375", "issue:44016|issue:44220", "issue:44246|issue:45003", "issue:44466|issue:44913", "issue:44112|issue:44792", "issue:42907|issue:43441", "issue:43854|issue:44792"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6996, "estimated_input_tokens": 3370, "item_count": 18, "node_count": 18, "serialized_chars": 13478, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:20:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5a6905d126f672d14d5668d5f30a1217a47f6f51a15d05cb0ee5d8d619a401ac", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43295", "issue:43299", "issue:43643", "issue:43673", "issue:43824", "issue:43976", "issue:44016", "issue:44062", "issue:44112", "issue:44279", "issue:44336", "issue:44496", "issue:44933", "issue:44964", "issue:44987", "issue:45216", "issue:45335", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43673", "reason": "One is a generation cache regression during chunked prefill; the other is ANSI formatting leaking in a loading report. Different code paths and symptoms.", "right": "issue:44336"}, {"accept": false, "left": "issue:43299", "reason": "Both involve Qwen models, but one is a loading failure for Qwen3VL MoE and the other is a save_pretrained regression for Qwen3.5 checkpoints. Different operations and bugs.", "right": "issue:45216"}, {"accept": false, "left": "issue:43295", "reason": "Custom processor/tokenizer image handling vs t5gemma embedding resizing. No shared failing code path.", "right": "issue:45335"}, {"accept": false, "left": "issue:44016", "reason": "Notebook syntax error and AddedToken keyword collision are unrelated issues.", "right": "issue:44062"}, {"accept": false, "left": "issue:43643", "reason": "Both are config/model-loading failures, but one is missing fields with trust_remote_code and the other is an unrecognized model_type. Different root causes.", "right": "issue:44496"}, {"accept": false, "left": "issue:43824", "reason": "ImportError for a specific Qwen2.5 VL class vs a nonexistent image_utils import. Different modules and fixes.", "right": "issue:44933"}, {"accept": false, "left": "issue:44112", "reason": "CI test flake in GraniteSpeech vs model loading failure for Phi-4 multimodal. Not the same bug.", "right": "issue:44964"}, {"accept": false, "left": "issue:43976", "reason": "Python version compatibility problem vs failure loading a specific model. Too broad to be duplicates.", "right": "issue:44987"}, {"accept": false, "left": "issue:43976", "reason": "General dependency/version complaint vs a Python-version compatibility report. Not the same underlying defect.", "right": "issue:44279"}, {"accept": false, "left": "issue:43299", "reason": "Both mention Qwen3.5/Qwen models, but one is model loading and the other is incorrect visual-encoder keys saved by save_pretrained. Different code paths.", "right": "issue:45357"}], "summary": "This cluster is heterogeneous: the issues span unrelated regressions in model loading, saving, config handling, docs, CI, and utility behavior. The soft-similarity pairs are mostly broad Transformer-version or Qwen mentions rather than the same underlying bug, so none should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43673|issue:44336", "issue:43299|issue:45216", "issue:43295|issue:45335", "issue:44016|issue:44062", "issue:43643|issue:44496", "issue:43824|issue:44933", "issue:44112|issue:44964", "issue:43976|issue:44987", "issue:43976|issue:44279", "issue:43299|issue:45357"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7238, "estimated_input_tokens": 3491, "item_count": 18, "node_count": 18, "serialized_chars": 13962, "soft_pair_count": 11}, "cached_at": "2026-04-14T16:21:08Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "db8c8148ff59f6e2a35f4d740ad2780a9108fe232da3098abed9840ce35dd82c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43526", "issue:43577", "issue:43901", "issue:44075", "issue:44242", "issue:44297", "issue:44351", "issue:44368", "issue:44462", "issue:44496", "issue:44521", "issue:44779", "issue:45042", "issue:45081", "issue:45200", "issue:45216", "issue:45278", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": "44462 is the best global issue candidate because it is user-facing, specific enough to be actionable, and sits closest to the small group of tokenizer-related reports in this otherwise heterogeneous cluster.", "best_pr_reason": null, "canonical_issue_reason": "44462 is the closest representative because it is a broad, concrete tokenizer-loading regression; the rest are mostly separate, unrelated failures rather than the same bug.", "canonical_pr_reason": null, "confidence": 0.53, "soft_edge_verdicts": [{"accept": false, "left": "issue:44075", "reason": "Different bugs: unused SGD optimizer args vs a Qwen3.5 tied-embeddings warning during LoRA fine-tuning.", "right": "issue:44368"}, {"accept": false, "left": "issue:43526", "reason": "Different areas: Beit image processor label reduction bug vs TextClassificationPipeline docs/behavior mismatch.", "right": "issue:43901"}, {"accept": false, "left": "issue:44496", "reason": "Different subsystems: unrecognized model/config loading vs PIL image processors incorrectly requiring torchvision.", "right": "issue:45042"}, {"accept": false, "left": "issue:44462", "reason": "Both tokenizer-related, but not the same concrete failure: ignoring tokenizer.json from a repo vs a Mistral regex patch crash.", "right": "issue:45081"}, {"accept": false, "left": "issue:44521", "reason": "Both touch multimodal logic, but they concern different code paths and outputs: assistant masks vs RoPE position IDs.", "right": "issue:45325"}, {"accept": false, "left": "issue:44242", "reason": "Different model logic: missing load-balancing loss when router logits are off vs mm_token_type_ids defaulting for text-only fine-tuning.", "right": "issue:45200"}, {"accept": false, "left": "issue:43901", "reason": "No overlap in bug type: docs/behavior mismatch for text classification vs model-type/config loading failure.", "right": "issue:44496"}, {"accept": false, "left": "issue:43577", "reason": "Different regressions: dtype not propagating for BLIP2 vs incorrect Qwen3.5 checkpoint saving.", "right": "issue:45216"}, {"accept": false, "left": "issue:44351", "reason": "Related only at a very high level; one is a specific missing import, the other is a broad post-upgrade import-error report.", "right": "issue:45278"}, {"accept": false, "left": "issue:44297", "reason": "Both involve tokenizer persistence/loading, but one is wrong tokenizer_class in saved config and the other ignores tokenizer.json at load time.", "right": "issue:44462"}, {"accept": false, "left": "issue:44462", "reason": "Different tokenizer regressions: repository tokenizer.json loading vs Deepseek tokenizer output becoming incorrect in v5.", "right": "issue:44779"}], "summary": "This cluster is loosely related by transformers loading/runtime regressions, but the items mostly describe distinct bugs across tokenizers, multimodal handling, imports, and image processors. No strong duplicate set emerges; 44462 is only the broadest concrete issue to anchor the cluster."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44075|issue:44368", "issue:43526|issue:43901", "issue:44496|issue:45042", "issue:44462|issue:45081", "issue:44521|issue:45325", "issue:44242|issue:45200", "issue:43901|issue:44496", "issue:43577|issue:45216", "issue:44351|issue:45278", "issue:44297|issue:44462", "issue:44462|issue:44779"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6890, "estimated_input_tokens": 3317, "item_count": 18, "node_count": 18, "serialized_chars": 13266, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:21:49Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5bcd9fbeafdffe0cd5e05d9dddea55ad86d552d972b11cdc42565c620a427be8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38175", "issue:41950", "issue:43295", "issue:43479", "issue:43525", "issue:43650", "issue:43854", "issue:43976", "issue:44279", "issue:44479", "issue:44496", "issue:44617", "issue:44779", "issue:44877", "issue:44964", "issue:45020", "issue:45245", "issue:45405"], "result": {"analyst_result": {"best_issue_reason": "issue:45020 best represents the shared theme of version-induced model loading breakage without being tied to one specific model or error message.", "best_pr_reason": null, "canonical_issue_reason": "issue:45020 is the broadest umbrella-style issue in the set, covering recent transformers regressions that break model loading/remote_code flows; the others are narrower, model-specific failures.", "canonical_pr_reason": null, "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "issue:43854", "reason": "Both are model-loading failures, but they involve different models and different reported breakages; too little evidence of one underlying bug.", "right": "issue:44964"}, {"accept": false, "left": "issue:44279", "reason": "One is a general dependency complaint and the other is a specific unreleased PEFT version bump; not the same issue.", "right": "issue:45405"}, {"accept": false, "left": "issue:43976", "reason": "Python version compatibility and missing PyPI release for MIN_PEFT_VERSION are unrelated problems.", "right": "issue:45405"}, {"accept": false, "left": "issue:44479", "reason": "A video-input regression for Qwen models is not the same as the categorical-cardinality runtime error.", "right": "issue:45245"}, {"accept": false, "left": "issue:41950", "reason": "The video-classification processor bug and the vague 'ADD THE DATA' issue do not describe the same code-path failure.", "right": "issue:43650"}, {"accept": false, "left": "issue:41950", "reason": "Both touch model/processor loading broadly, but one is a specific video-classification processor lookup bug and the other is a wide remote_code regression.", "right": "issue:45020"}, {"accept": false, "left": "issue:43479", "reason": "Both are multimodal configuration issues, but they fail in different places and do not appear to be the same defect.", "right": "issue:43525"}, {"accept": false, "left": "issue:44496", "reason": "Both concern config/model loading, but one is an unrecognized model_type and the other is strict config handling for granite_speech; not the same bug.", "right": "issue:44877"}, {"accept": false, "left": "issue:38175", "reason": "Unexpected zero probabilities in a SigLIP model and CUDA OOM in Sam3Video are clearly unrelated.", "right": "issue:44617"}, {"accept": false, "left": "issue:43295", "reason": "Both mention tokenizer regressions, but they affect different models and symptoms; too speculative to merge as duplicates.", "right": "issue:44779"}], "summary": "The cluster is mostly a loose collection of unrelated model-loading, config, tokenizer, and dependency-regression reports. None of the soft pairs look like the same concrete bug, so all should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43854|issue:44964", "issue:44279|issue:45405", "issue:43976|issue:45405", "issue:44479|issue:45245", "issue:41950|issue:43650", "issue:41950|issue:45020", "issue:43479|issue:43525", "issue:44496|issue:44877", "issue:38175|issue:44617", "issue:43295|issue:44779"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6738, "estimated_input_tokens": 3241, "item_count": 18, "node_count": 18, "serialized_chars": 12964, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:22:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "77ad68ca7c9cfe15c6184155d1ab1921845e9b004b92f36aaa5621f7a9f016bf", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39692", "issue:42994", "issue:43299", "issue:43317", "issue:43441", "issue:43450", "issue:43550", "issue:43650", "issue:43723", "issue:43854", "issue:43901", "issue:44016", "issue:44162", "issue:44279", "issue:44291", "issue:44683", "issue:44964", "issue:45290"], "result": {"analyst_result": {"best_issue_reason": "issue:44162 is the best overall issue candidate because it is clearly framed, impactful, and not just a narrow docs or test artifact.", "best_pr_reason": null, "canonical_issue_reason": "issue:44162 is the strongest standalone issue in the set: it describes a broad, high-impact regression with clear user impact, making it the best canonical representative among otherwise unrelated tickets.", "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:43299", "reason": "Different models and failure modes: Qwen3VL MoE loading regression vs GLM-4.7-Flash test loading issue. No evidence of the same code path.", "right": "issue:43854"}, {"accept": false, "left": "issue:43441", "reason": "Both involve attention backends, but one is Ministral-3 FlashAttention behavior and the other is compiled flex_attention on torch>=2.9. Different concrete bugs.", "right": "issue:44683"}, {"accept": false, "left": "issue:39692", "reason": "Both mention documentation, but one is a SigLIP2 example with model/processor mismatch and quantization failure; the other is a pipeline docs mismatch about return_all_scores. Not the same bug.", "right": "issue:43901"}, {"accept": false, "left": "issue:43450", "reason": "Video processor shape handling and a vague dependency issue are unrelated.", "right": "issue:44279"}, {"accept": false, "left": "issue:43723", "reason": "Tokenizer loading in v5 and ESM2 model breakage are different subsystems and different failure behaviors.", "right": "issue:44162"}, {"accept": false, "left": "issue:43650", "reason": "Generic data request vs chat template crash with tool-call assistant messages. No underlying code-path overlap.", "right": "issue:45290"}, {"accept": false, "left": "issue:42994", "reason": "Quantized model saving failure is not the same as loading a dequantized model with device_map=auto and CPU/GPU offload.", "right": "issue:43317"}, {"accept": false, "left": "issue:43550", "reason": "torch.compile+SDPA failure in Bamba-9B-v2 is unrelated to loading Phi-4-multimodal-instruct with latest transformers.", "right": "issue:44964"}, {"accept": false, "left": "issue:44016", "reason": "Notebook syntax error and init_empty_weights TypeError are unrelated issues affecting different surfaces.", "right": "issue:44291"}], "summary": "All proposed soft pairs look like separate bugs or docs regressions rather than duplicates. The set spans unrelated model-loading, attention/backends, tokenizer, quantization, notebook, and chat-template issues."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43299|issue:43854", "issue:43441|issue:44683", "issue:39692|issue:43901", "issue:43450|issue:44279", "issue:43723|issue:44162", "issue:43650|issue:45290", "issue:42994|issue:43317", "issue:43550|issue:44964", "issue:44016|issue:44291"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6814, "estimated_input_tokens": 3279, "item_count": 18, "node_count": 18, "serialized_chars": 13113, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:23:08Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6fa71b0de633987ca87e9202a91d0bc194da38b5806e903d99646c2bfe9a8db2", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43065", "issue:43122", "issue:43262", "issue:43352", "issue:43650", "issue:43673", "issue:43761", "issue:44261", "issue:44263", "issue:44466", "issue:44521", "issue:44671", "issue:44779", "issue:44898", "issue:45071", "issue:45250", "issue:45276", "issue:45372"], "result": {"analyst_result": {"best_issue_reason": "Issue 44261 is the best overall issue candidate because it is a concrete regression report with high discussion/reference activity and a clear target for fix work.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44261 is the strongest representative bug report here: it is active, well-discussed, and has the broadest follow-on signal (many inbound references) while still describing a concrete, actionable defect.", "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:44261", "reason": "Different bugs in different code paths: MLA q_a_layernorm config handling vs GlmMoeDsaIndexer split outputs.", "right": "issue:44263"}, {"accept": false, "left": "issue:43065", "reason": "Both concern vision models, but they are distinct problems in different models and behaviors (Sam3PixelDecoder dummy Conv2d vs Perceiver resize/interpolation failure).", "right": "issue:44898"}, {"accept": false, "left": "issue:43262", "reason": "Shared apply_chat_template context, but the bugs differ: audio sampling-rate defaulting vs multimodal assistant mask generation.", "right": "issue:44521"}, {"accept": false, "left": "issue:44466", "reason": "Different underlying issues: device-dependent lm_head serialization vs resize_token_embeddings not propagating to Gemma4 embedding/output weights.", "right": "issue:45276"}, {"accept": false, "left": "issue:44671", "reason": "Both are v5 regressions, but one is CamemBERT masked-LM prediction drift and the other is Deepseek tokenizer output mismatch; not the same bug.", "right": "issue:44779"}, {"accept": false, "left": "issue:43122", "reason": "The second item is not a substantive matching bug report; no evidence it refers to the tokenizer regression in 43122.", "right": "issue:43650"}, {"accept": false, "left": "issue:43673", "reason": "Unrelated regressions: missing GenerationMixin cache in chunked prefill vs CLIPVisionModel.hidden_states not returned.", "right": "issue:43761"}, {"accept": false, "left": "issue:45071", "reason": "Different classes of failure: PretrainedConfig type checking breakage vs Gemma 4 processor import failure from mistral_common.", "right": "issue:45372"}, {"accept": false, "left": "issue:43352", "reason": "Flash Attention 2.0 is a shared topic, but the concrete problem differs: a model-specific unsupported-FA2 error vs a generic FA2 issue.", "right": "issue:45250"}], "summary": "These are mostly unrelated issue reports spanning different models, tokenizers, config/type-checking, and chat-template regressions. None of the soft pairs look like true duplicates of the same underlying bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44261|issue:44263", "issue:43065|issue:44898", "issue:43262|issue:44521", "issue:44466|issue:45276", "issue:44671|issue:44779", "issue:43122|issue:43650", "issue:43673|issue:43761", "issue:45071|issue:45372", "issue:43352|issue:45250"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6604, "estimated_input_tokens": 3174, "item_count": 17, "node_count": 17, "serialized_chars": 12696, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:23:35Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "55b0aa3d1cc5be70c024e8853cd7abc0cff7f9e5be2a833f129454fcce9d27d9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42175", "issue:43295", "issue:43475", "issue:43525", "issue:43824", "issue:43881", "issue:43901", "issue:43976", "issue:43994", "issue:44016", "issue:44246", "issue:44589", "issue:44617", "issue:44623", "issue:44749", "issue:45362", "issue:45397"], "result": {"analyst_result": {"best_issue_reason": "No good global representative exists because the issues are unrelated across different subsystems and failure modes.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits: the cluster is heterogeneous and the soft pairs do not describe the same defect.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:42175", "reason": "Different problems: missing TensorFlow in a torch extra vs Python version compatibility.", "right": "issue:43976"}, {"accept": false, "left": "issue:44623", "reason": "Unrelated: processor.save_pretrained files missing vs a slowdown regression in filtering data.", "right": "issue:44749"}, {"accept": false, "left": "issue:43901", "reason": "Docs/API mismatch for return_all_scores is not the same as incorrect SigLIP2 model outputs.", "right": "issue:43994"}, {"accept": false, "left": "issue:44623", "reason": "Different loading/saving issues: missing processor files vs Gemma-4 ZeRO-3 from_pretrained failure.", "right": "issue:45397"}, {"accept": false, "left": "issue:43295", "reason": "Custom processor/tokenizer regression is unrelated to a CUDA OOM in Sam3Video.", "right": "issue:44617"}, {"accept": false, "left": "issue:43824", "reason": "Import error for a specific model class is not the same as a Python 3.9/3.10 support issue.", "right": "issue:43976"}, {"accept": false, "left": "issue:43475", "reason": "Sam3 video attribute error and a notebook syntax error are clearly different bugs.", "right": "issue:44016"}, {"accept": false, "left": "issue:44246", "reason": "Slow import and CUDA OOM are separate performance/runtime failures with different code paths.", "right": "issue:44617"}, {"accept": false, "left": "issue:43525", "reason": "Two model-specific load/config errors, but for different models and attributes; not the same bug.", "right": "issue:43881"}, {"accept": false, "left": "issue:44589", "reason": "Float8 storage lookup failure is unrelated to Qwen3.5 chat crashes.", "right": "issue:45362"}], "summary": "This cluster is not a true duplicate set: the issues span unrelated packaging, model-loading, docs, performance, and notebook errors. None of the soft pairs look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42175|issue:43976", "issue:44623|issue:44749", "issue:43901|issue:43994", "issue:44623|issue:45397", "issue:43295|issue:44617", "issue:43824|issue:43976", "issue:43475|issue:44016", "issue:44246|issue:44617", "issue:43525|issue:43881", "issue:44589|issue:45362"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6942, "estimated_input_tokens": 3343, "item_count": 18, "node_count": 18, "serialized_chars": 13372, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:24:28Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "01b2036874de78e416e98f8be57f2352cd10b4dbcab0afcafe28ffbf82199cba", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41628", "issue:41950", "issue:42491", "issue:43299", "issue:43901", "issue:43976", "issue:44188", "issue:44220", "issue:44246", "issue:44297", "issue:44336", "issue:44361", "issue:44373", "issue:44462", "issue:44496", "issue:44749", "issue:45356", "issue:45397"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "issue:44188", "reason": "Different failures: torch.compile attention-kernel divergence vs ANSI codes emitted by loading_report.", "right": "issue:44336"}, {"accept": false, "left": "issue:43901", "reason": "One is a docs/behavior mismatch for return_all_scores; the other is intermittent slow import. No shared code bug.", "right": "issue:44246"}, {"accept": false, "left": "issue:41628", "reason": "Missing AutoImageProcessor import and Python 3.9/3.10 compatibility regression are unrelated symptoms and fixes.", "right": "issue:43976"}, {"accept": false, "left": "issue:44462", "reason": "AutoTokenizer ignoring tokenizer.json is a different loading-path bug from Kimi-K2.5 codec handling and warning text.", "right": "issue:45356"}, {"accept": false, "left": "issue:41950", "reason": "Video-classification image-processor lookup and _torch_extract_fbank_features are separate pipeline/audio code paths.", "right": "issue:44220"}, {"accept": false, "left": "issue:44297", "reason": "tokenizer.save_pretrained metadata mismatch is unrelated to gemma-4 zero3 from_pretrained loading issues.", "right": "issue:45397"}, {"accept": false, "left": "issue:44336", "reason": "ANSI codes in loading_report and a wrong docstring for position_ids are different kinds of issues.", "right": "issue:44373"}, {"accept": false, "left": "issue:44361", "reason": "MLukeTokenizer AttributeError and a general slowdown after upgrading are not the same bug.", "right": "issue:44749"}, {"accept": false, "left": "issue:42491", "reason": "Both involve Qwen3/MoE loading regressions, but one is a LoRA compatibility break and the other is a Qwen3VL MoE load failure; not clearly the same concrete issue.", "right": "issue:43299"}, {"accept": false, "left": "issue:44246", "reason": "Slow import intermittently and unrecognized model/config.json are distinct problems in different startup paths.", "right": "issue:44496"}], "summary": "This cluster is mostly heterogeneous Transformers issues spanning tokenizer regressions, model-loading breakages, pipeline bugs, docs, and import/performance problems. None of the soft pairs are strong enough to treat as the same underlying bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44188|issue:44336", "issue:43901|issue:44246", "issue:41628|issue:43976", "issue:44462|issue:45356", "issue:41950|issue:44220", "issue:44297|issue:45397", "issue:44336|issue:44373", "issue:44361|issue:44749", "issue:42491|issue:43299", "issue:44246|issue:44496"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6526, "estimated_input_tokens": 3135, "item_count": 17, "node_count": 17, "serialized_chars": 12540, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:25:22Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6bc15bfee1b90f9048a18025ff5ab66fa36d7030c942918dd9b1913ff75a6689", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36246", "issue:43526", "issue:43531", "issue:43673", "issue:43994", "issue:44242", "issue:44291", "issue:44295", "issue:44336", "issue:44351", "issue:44485", "issue:44561", "issue:44857", "issue:45003", "issue:45276", "issue:45362", "issue:45397"], "result": {"analyst_result": {"best_issue_reason": "issue:45003 is the best representative issue because it targets shared infrastructure rather than a model-specific edge case, so it is the most reusable anchor for triage/search.", "best_pr_reason": null, "canonical_issue_reason": "issue:45003 is the broadest, most central core-runtime report in the set (modeling_utils / sys.modules access) and is open, but the cluster overall does not look like one duplicate family.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:36246", "reason": "Both are import/export failures, but they involve different missing symbols in different modules. No evidence they stem from the same code-path bug.", "right": "issue:44351"}, {"accept": false, "left": "issue:43531", "reason": "Both mention Qwen models, but one is a sliding_window bug and the other is a chat/runtime crash. Too different to be the same fix.", "right": "issue:45362"}, {"accept": false, "left": "issue:44295", "reason": "These are separate model-specific failures with different triggers: buffer access vs Zero3/from_pretrained behavior.", "right": "issue:45397"}, {"accept": false, "left": "issue:43526", "reason": "BeitImageProcessorFast reduce_labels behavior is unrelated to sys.modules access in modeling_utils.", "right": "issue:45003"}, {"accept": false, "left": "issue:44336", "reason": "ANSI-code output handling and GLM-5 RoPE implementation are unrelated bugs in different areas.", "right": "issue:44485"}, {"accept": false, "left": "issue:43994", "reason": "Siglip2 AutoModel/pipeline output corruption and gemma4 resize_token_embeddings are distinct model behaviors with no shared concrete bug.", "right": "issue:45276"}, {"accept": false, "left": "issue:43673", "reason": "Chunked_prefill cache handling and the removal of is_torch_fx_available affect different subsystems and failure modes.", "right": "issue:44561"}, {"accept": false, "left": "issue:44291", "reason": "Unexpected _is_hf_initialized in init_empty_weights and a Qwen chat crash are not the same underlying issue.", "right": "issue:45362"}, {"accept": false, "left": "issue:44242", "reason": "MoE load-balancing loss gating and an AMP/CUDA loss crash are different problems in different code paths.", "right": "issue:44857"}], "summary": "This cluster is mostly a set of unrelated closed/open bug reports across different models and utilities. The soft pairs share broad thematic similarity at best, but not the same underlying bug or fix path."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:36246|issue:44351", "issue:43531|issue:45362", "issue:44295|issue:45397", "issue:43526|issue:45003", "issue:44336|issue:44485", "issue:43994|issue:45276", "issue:43673|issue:44561", "issue:44291|issue:45362", "issue:44242|issue:44857"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6934, "estimated_input_tokens": 3339, "item_count": 18, "node_count": 18, "serialized_chars": 13354, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:26:11Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9e93766f343085f5f6215376727cd9f45251d5a8120ddc13b5b26b4f98857cc8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36246", "issue:41628", "issue:41950", "issue:42886", "issue:43066", "issue:43441", "issue:43540", "issue:43596", "issue:44162", "issue:44246", "issue:44779", "issue:44871", "issue:44877", "issue:44995", "issue:45200", "issue:45229", "issue:45292", "issue:45375"], "result": {"analyst_result": {"best_issue_reason": "No global best issue is a good anchor here for the same reason; any single issue would misrepresent the cluster\u2019s mixed, non-duplicate contents.", "best_pr_reason": null, "canonical_issue_reason": "No single issue is a credible canonical for the cluster: the items span different model families and distinct failure modes (imports, tokenizer behavior, config strictness, performance, OOM, and runtime crashes).", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43066", "reason": "Both are tokenizer regressions, but one is a wrong decoder-type issue and the other is a DeepSeek-specific incorrect-results bug; different code paths and symptoms.", "right": "issue:44779"}, {"accept": false, "left": "issue:44162", "reason": "ESM2 model breakage and Gemma 4 mm_token_type_ids requirements are unrelated model-specific bugs.", "right": "issue:45200"}, {"accept": false, "left": "issue:36246", "reason": "An import error for Qwen2.5 VLImageProcessor is unrelated to the Deepspeed Zero3/BertModel index-out-of-bounds crash.", "right": "issue:43596"}, {"accept": false, "left": "issue:44877", "reason": "Both involve config handling, but they target different models and different symptoms; not the same concrete bug.", "right": "issue:45375"}, {"accept": false, "left": "issue:42886", "reason": "Offline tokenizer cache loading and Gemma4 multi-GPU CUDA OOM are entirely different failures.", "right": "issue:45229"}, {"accept": false, "left": "issue:43441", "reason": "Ministral FlashAttention failure and Qwen3OmniMoe video-processing ValueError are unrelated model/runtime bugs.", "right": "issue:43540"}, {"accept": false, "left": "issue:41628", "reason": "AutoImageProcessor import failure and video-classification looking for image processors are different issues, not one fixable code-path bug.", "right": "issue:41950"}, {"accept": false, "left": "issue:44246", "reason": "Slow import timing and Gemma-3 EOS token configuration mismatch are unrelated.", "right": "issue:44871"}, {"accept": false, "left": "issue:44995", "reason": "A stale indexer cache crash in GlmMoeDsa is not the same as Gemma 4\u2019s missing mm_token_type_ids defaulting issue.", "right": "issue:45200"}, {"accept": false, "left": "issue:44779", "reason": "DeepSeek tokenizer correctness and resize_token_embeddings not updating output embeddings are different subsystems and bugs.", "right": "issue:45292"}], "summary": "This cluster is a loose similarity bucket of mostly unrelated Transformers issues; the soft pairs share broad keywords but not the same underlying bug or fix path."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43066|issue:44779", "issue:44162|issue:45200", "issue:36246|issue:43596", "issue:44877|issue:45375", "issue:42886|issue:45229", "issue:43441|issue:43540", "issue:41628|issue:41950", "issue:44246|issue:44871", "issue:44995|issue:45200", "issue:44779|issue:45292"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6558, "estimated_input_tokens": 3151, "item_count": 17, "node_count": 17, "serialized_chars": 12603, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:27:09Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5fc67317c8753374e790964fd86a6a4d96b70c841fea5f067d18a3a8896fb836", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43065", "issue:43232", "issue:43257", "issue:43262", "issue:43295", "issue:43408", "issue:43475", "issue:43540", "issue:43901", "issue:44246", "issue:44295", "issue:44336", "issue:44496", "issue:44749", "issue:45003", "issue:45137", "issue:45292"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:43232", "reason": "Generation kwargs after sync_gpus and ANSI escape handling in loading_report are unrelated code paths and symptoms.", "right": "issue:44336"}, {"accept": false, "left": "issue:43901", "reason": "A docs/behavior mismatch for TextClassificationPipeline is unrelated to unsafe sys.modules access in modeling_utils.", "right": "issue:45003"}, {"accept": false, "left": "issue:44295", "reason": "Position_ids buffer access error and Chinese-reported slowdown after upgrade are different failures with no clear shared bug.", "right": "issue:44749"}, {"accept": false, "left": "issue:43065", "reason": "Both are SAM3-related, but one is a dummy Conv2d in Sam3PixelDecoder while the other is a model-type warning for sam3_video vs sam3_tracker; not the same fix.", "right": "issue:43408"}, {"accept": false, "left": "issue:43262", "reason": "Audio chat-template sampling-rate defaulting and slow import behavior are unrelated.", "right": "issue:44246"}, {"accept": false, "left": "issue:44496", "reason": "Unrecognized model config for OLMo-Hybrid-Instruct-SFT-7B is a model registration/config issue, not the sys.modules access bug.", "right": "issue:45003"}, {"accept": false, "left": "issue:43257", "reason": "Both mention DeepSpeed, but Qwen3 MOE weight conversion and ZeRO3 deque underflow are different concrete failures.", "right": "issue:45137"}, {"accept": false, "left": "issue:43475", "reason": "SAM3 video encoder output missing an attribute and Qwen3OmniMoe video-input validation are different model-specific issues.", "right": "issue:43540"}, {"accept": false, "left": "issue:43295", "reason": "Processor.tokenizer access regression and resize_token_embeddings not updating output embeddings are distinct model API bugs.", "right": "issue:45292"}], "summary": "This cluster is heterogeneous: the items span unrelated subsystems (SAM3, Qwen, audio processors, generation, docs, loading, etc.). None of the soft pairs look like the same underlying bug or a single mergeable change set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43232|issue:44336", "issue:43901|issue:45003", "issue:44295|issue:44749", "issue:43065|issue:43408", "issue:43262|issue:44246", "issue:44496|issue:45003", "issue:43257|issue:45137", "issue:43475|issue:43540", "issue:43295|issue:45292"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7234, "estimated_input_tokens": 3489, "item_count": 18, "node_count": 18, "serialized_chars": 13955, "soft_pair_count": 11}, "cached_at": "2026-04-14T16:27:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0ba1c4ff2ce8313cb20177c3a323a700cb27f61ed35537a543f874b86131bcc2", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43232", "issue:43441", "issue:43606", "issue:43653", "issue:43673", "issue:43931", "issue:44060", "issue:44077", "issue:44368", "issue:44442", "issue:44466", "issue:44734", "issue:44749", "issue:44869", "issue:44977", "issue:45278", "issue:45335", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "43673 is the least specific representative of the few generation-related items, though it only loosely overlaps with 43232 and 44734 and does not fit the other issues.", "best_pr_reason": null, "canonical_issue_reason": "43673 is the broadest generation/cache regression in the set, but the cluster is too mixed for a strong canonical duplicate anchor.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:44060", "reason": "Both are warning/validation issues, but they concern different models and different configuration checks; not the same bug.", "right": "issue:44077"}, {"accept": false, "left": "issue:43673", "reason": "Both involve generation/cache code, but one is a missing cache in chunked prefill and the other is wrong indexing in /v1/responses; different failure modes and code paths.", "right": "issue:44734"}, {"accept": false, "left": "issue:44869", "reason": "Both mention decoding/codec-like symptoms, but they affect different models and different tokenizer/decoder paths; not a duplicate.", "right": "issue:45356"}, {"accept": false, "left": "issue:44368", "reason": "Same model family, but one is a tied-embedding warning during LoRA fine-tuning and the other is a FlashAttention generation failure; different issues.", "right": "issue:44977"}, {"accept": false, "left": "issue:44466", "reason": "Both touch embeddings/tied weights, but one is inconsistent serialization by device and the other is resize_token_embeddings not propagating; not the same concrete bug.", "right": "issue:45335"}, {"accept": false, "left": "issue:43653", "reason": "A tokenizer special-token bug and a training slowdown regression are unrelated.", "right": "issue:44749"}, {"accept": false, "left": "issue:44442", "reason": "AutoTokenizer load failure and a post-upgrade slowdown are different problem classes.", "right": "issue:44749"}, {"accept": false, "left": "issue:43232", "reason": "Both are generation-related, but sync_gpus kwargs handling and chunked_prefill cache handling are distinct code paths and symptoms.", "right": "issue:43673"}, {"accept": false, "left": "issue:43931", "reason": "Model weight-shape loading error is unrelated to a tie_word_embeddings warning during fine-tuning.", "right": "issue:44368"}, {"accept": false, "left": "issue:43441", "reason": "FlashAttention failure on one model and CPU offload device mismatch on another are different regressions.", "right": "issue:43606"}, {"accept": false, "left": "issue:43673", "reason": "A specific generation/cache bug is not the same as broad import errors after upgrade.", "right": "issue:45278"}], "summary": "This cluster is heterogeneous: most items are unrelated model-specific regressions, tokenizer issues, or tied-weights warnings. No soft pair looks like the same underlying bug, so they should not be merged as duplicates. If one issue must serve as an anchor, 43673 is the closest on the generation/cache side, but it still does not represent the rest well."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44060|issue:44077", "issue:43673|issue:44734", "issue:44869|issue:45356", "issue:44368|issue:44977", "issue:44466|issue:45335", "issue:43653|issue:44749", "issue:44442|issue:44749", "issue:43232|issue:43673", "issue:43931|issue:44368", "issue:43441|issue:43606", "issue:43673|issue:45278"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6954, "estimated_input_tokens": 3349, "item_count": 18, "node_count": 18, "serialized_chars": 13395, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:29:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "168c5025b15b177323aa4da4dd38e3f6bab133636e4133675da4f441c2fde2bc", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:28282", "issue:30990", "issue:36246", "issue:42371", "issue:42673", "issue:42898", "issue:43381", "issue:43452", "issue:43646", "issue:43704", "issue:43824", "issue:44162", "issue:44297", "issue:44623", "issue:45003", "issue:45092", "issue:45292", "issue:45335"], "result": {"analyst_result": {"best_issue_reason": "Issue 42673 is the strongest issue representative in this set because it is an exact duplicate target with clear symptom wording and the most active discussion among the duplicate pair.", "best_pr_reason": null, "canonical_issue_reason": "Issue 42673 is the best canonical for the Qwen3 leak family: it is the earlier report, has the exact same title as 43704, and has much richer discussion activity.", "canonical_pr_reason": null, "confidence": 0.71, "soft_edge_verdicts": [{"accept": false, "left": "issue:43646", "reason": "Both mention Transformers breakage, but one is custom model initialization in v5 and the other is ESM2-specific breakage; different concrete failures and code paths.", "right": "issue:44162"}, {"accept": true, "left": "issue:42673", "reason": "Exact same title and same Qwen3ForCausalLM VRAM leak symptom; these are the same bug report.", "right": "issue:43704"}, {"accept": false, "left": "issue:44297", "reason": "Both involve save_pretrained, but tokenizer metadata mismatch and missing processor files are different serialization bugs.", "right": "issue:44623"}, {"accept": false, "left": "issue:28282", "reason": "ImportError for missing PyTorch and a loading hang in Sentence Transformers are unrelated symptoms.", "right": "issue:30990"}, {"accept": false, "left": "issue:43381", "reason": "Gradient checkpointing in eval mode and resize_token_embeddings not updating output embeddings are distinct APIs and failures.", "right": "issue:45292"}, {"accept": false, "left": "issue:42898", "reason": "Tokenizer cleanup behavior change in v5 is unrelated to the ESM2 breakage report.", "right": "issue:44162"}, {"accept": false, "left": "issue:43381", "reason": "One is about eval-mode checkpointing; the other is a t5gemma-specific resize_token_embeddings/output-embedding bug.", "right": "issue:45335"}, {"accept": true, "left": "issue:36246", "reason": "Both are Qwen2.5-VL import/export failures around missing class reexports in the package; they likely stem from the same module export regression.", "right": "issue:43824"}, {"accept": false, "left": "issue:43452", "reason": "gguf_file/from_pretrained breakage and unsafe sys.modules access in modeling_utils are different load-path issues.", "right": "issue:45003"}, {"accept": false, "left": "issue:42371", "reason": "TF32 API guidance and old InternVL2 meta-initialization incompatibility are unrelated problems.", "right": "issue:45092"}], "summary": "This cluster is mostly heterogeneous. The only clear duplicate issue family is the Qwen3 VRAM leak report (exact title match), and there is a plausible second duplicate family around Qwen2.5-VL import/export errors. The remaining pairs look like unrelated bugs sharing only broad subsystem overlap."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43646|issue:44162", "issue:42673|issue:43704", "issue:44297|issue:44623", "issue:28282|issue:30990", "issue:43381|issue:45292", "issue:42898|issue:44162", "issue:43381|issue:45335", "issue:36246|issue:43824", "issue:43452|issue:45003", "issue:42371|issue:45092"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6498, "estimated_input_tokens": 3121, "item_count": 17, "node_count": 17, "serialized_chars": 12484, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:29:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a966a0da5da8b0a4f61b4cc212fcb5c4715fc80ba4fa625857e4f32b5f4aa7b1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41628", "issue:42222", "issue:42994", "issue:43475", "issue:43525", "issue:43643", "issue:43873", "issue:44038", "issue:44623", "issue:44933", "issue:44945", "issue:45081", "issue:45103", "issue:45200", "issue:45310", "issue:45375", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "Issue 43643 is the most generic and upstream-facing of the set, centered on missing fields in config objects. Still, the cluster is too heterogeneous for it to serve as a real umbrella issue.", "best_pr_reason": null, "canonical_issue_reason": "No strong canonical duplicate exists; if one representative is needed, issue 43643 is the broadest config/field-preservation report, but it does not subsume the rest of the cluster.", "canonical_pr_reason": null, "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "issue:41628", "reason": "Both are import-related, but they concern different missing symbols in different modules ('AutoImageProcessor' vs an image_utils import), so they are not the same bug.", "right": "issue:44933"}, {"accept": false, "left": "issue:43475", "reason": "Both are vision/video model bugs, but one is a missing attribute on Sam3VisionEncoderOutput and the other is an incorrect vision_position_ids behavior for qwen2.5-vl video input; different models and failure modes.", "right": "issue:45381"}, {"accept": false, "left": "issue:45081", "reason": "These crash in different subsystems: Mistral tokenizer regex patching vs auto_docstring processing with future annotations. Same symptom class, different code paths.", "right": "issue:45103"}, {"accept": false, "left": "issue:42222", "reason": "Both involve model configuration/processing, but vitpose breakage and Gemma 4 mm_token_type_ids defaulting are unrelated model-specific issues.", "right": "issue:45200"}, {"accept": false, "left": "issue:43475", "reason": "One is a SAM3 video encoder attribute access error, the other is generic config field loss with trust_remote_code; related only at a very high level of 'missing fields'.", "right": "issue:43643"}, {"accept": false, "left": "issue:42994", "reason": "Quantized model saving failure and Llama4Config missing pad_token_id are different bugs affecting different workflows and components.", "right": "issue:43525"}, {"accept": false, "left": "issue:45310", "reason": "Both concern Qwen3.5/Moe family configs, but one is a from_pretrained error on transformers>=5.4.0 while the other is a dropped deepstack_visual_indexes field; not clearly the same underlying defect.", "right": "issue:45375"}, {"accept": false, "left": "issue:44038", "reason": "Quantization/offloading behavior and processor.save_pretrained missing files are separate issues with different APIs and symptoms.", "right": "issue:44623"}, {"accept": false, "left": "issue:43873", "reason": "Offloading with quantization and incorrect LLM output under pipeline parallelism are distinct runtime behaviors and code paths.", "right": "issue:44945"}], "summary": "This cluster is mostly a loose semantic grouping of distinct Transformers bugs rather than a true duplicate set. Several items mention missing fields/attributes, but they affect different models, APIs, and code paths."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:41628|issue:44933", "issue:43475|issue:45381", "issue:45081|issue:45103", "issue:42222|issue:45200", "issue:43475|issue:43643", "issue:42994|issue:43525", "issue:45310|issue:45375", "issue:44038|issue:44623", "issue:43873|issue:44945"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7096, "estimated_input_tokens": 3420, "item_count": 18, "node_count": 18, "serialized_chars": 13677, "soft_pair_count": 12}, "cached_at": "2026-04-14T16:30:35Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8082d64d063698cf9089f7fc0b4cc8e89cfd3783cdcc1bafdcd4edb68ac3d6f3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:41628", "issue:42831", "issue:42994", "issue:43232", "issue:43475", "issue:43525", "issue:43673", "issue:44164", "issue:44188", "issue:44222", "issue:44351", "issue:44661", "issue:45230", "issue:45310", "issue:45362", "issue:45375", "issue:45397"], "result": {"analyst_result": {"best_issue_reason": "issue:45397 is the most concrete and actionable report here: it names a specific model, version, and failure mode, while most other issues are either broader, vaguer, or clearly unrelated.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out because the cluster is heterogeneous and the soft-similarity links are mostly superficial rather than true duplicates.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:45230", "reason": "`Bug report` is too vague; 45397 is a specific zero3/from_pretrained failure for Gemma-4, so these are not the same bug.", "right": "issue:45397"}, {"accept": false, "left": "issue:43475", "reason": "SAM 3 Video attribute error and add-new-model-like tokenizer-mapping failure are different code paths and different failures.", "right": "issue:44661"}, {"accept": false, "left": "issue:45362", "reason": "Both mention Qwen3.5, but one is a chat crash and the other is a missing config field; distinct underlying bugs.", "right": "issue:45375"}, {"accept": false, "left": "issue:43232", "reason": "Generation kwargs synchronization after `sync_gpus` and torch.compile attention-kernel divergence are unrelated generation bugs.", "right": "issue:44188"}, {"accept": false, "left": "issue:41628", "reason": "Import errors for `AutoImageProcessor` and `HybridCache` are separate missing-symbol issues, not one duplicate.", "right": "issue:44351"}, {"accept": false, "left": "issue:44222", "reason": "FP8 MoE save_pretrained failure and Qwen3.5 MoE from_pretrained error may be adjacent, but they affect different operations and are not clearly the same bug.", "right": "issue:45310"}, {"accept": false, "left": "issue:42831", "reason": "An accuracy regression in FineGrainedFP8 is not the same as an FP8 MoE save_pretrained failure.", "right": "issue:44222"}, {"accept": false, "left": "issue:42994", "reason": "Both involve saving/loading, but one is quantized-model saving and the other is extra_state handling; different failure mechanisms.", "right": "issue:44164"}, {"accept": false, "left": "issue:36010", "reason": "Different import symbols fail (`GenerationMixin` vs `AutoImageProcessor`), so these are not duplicates.", "right": "issue:41628"}, {"accept": false, "left": "issue:43525", "reason": "Missing `pad_token_id` on Llama4Config and missing `deepstack_visual_indexes` on Qwen3_5MoeVisionConfig are separate config-schema bugs.", "right": "issue:45375"}, {"accept": false, "left": "issue:43673", "reason": "Chunked-prefill generation cache issue and torch.compile attention branching issue are different generation-related problems.", "right": "issue:44188"}, {"accept": false, "left": "issue:45310", "reason": "Both involve Qwen/MoE from_pretrained behavior, but the model/version and failure details differ enough that this is not clearly one underlying bug.", "right": "issue:45397"}], "summary": "This cluster is not a duplicate set; the issues only share broad keywords like imports, generation, FP8, or from_pretrained/save_pretrained. The soft pairs look like different bugs in related areas, not the same underlying fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:45230|issue:45397", "issue:43475|issue:44661", "issue:45362|issue:45375", "issue:43232|issue:44188", "issue:41628|issue:44351", "issue:44222|issue:45310", "issue:42831|issue:44222", "issue:42994|issue:44164", "issue:36010|issue:41628", "issue:43525|issue:45375", "issue:43673|issue:44188", "issue:45310|issue:45397"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6964, "estimated_input_tokens": 3354, "item_count": 18, "node_count": 18, "serialized_chars": 13416, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:31:22Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0b80ae8bbe3cd77b0fa2a40e1405c9e020fb584ec939d87f3d9685422b71e3a6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30990", "issue:35141", "issue:42175", "issue:42898", "issue:43232", "issue:43377", "issue:43479", "issue:43784", "issue:43824", "issue:43976", "issue:44038", "issue:44079", "issue:44242", "issue:44568", "issue:44792", "issue:44869", "issue:44933", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": "Issue 45310 is the best representative of the near-duplicate Qwen3/Qwen3.5 MoE loading bug; the rest of the items are unrelated to that code path.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45310 is the clearest anchor for the only plausible duplicate pair: it is the most specific, recent report of the Qwen3/Qwen3.5 MoE from_pretrained regression.", "canonical_pr_reason": null, "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "issue:30990", "reason": "Unrelated problems: sentence-transformers loading hang vs. token-embedding resizing/post_init reinitialization.", "right": "issue:35141"}, {"accept": false, "left": "issue:43232", "reason": "Different code paths: generation kwargs sync bug vs. janus image-generation test failure.", "right": "issue:44792"}, {"accept": false, "left": "issue:43479", "reason": "Config default initialization bug vs. ModelOutput key assignment bug; not the same underlying issue.", "right": "issue:44079"}, {"accept": false, "left": "issue:43377", "reason": "MIMI batching/padding-mask inconsistency vs. MoE load-balancing loss gating; different bugs.", "right": "issue:44242"}, {"accept": true, "left": "issue:44038", "reason": "Both describe Qwen3/Qwen3.5 MoE from_pretrained failures in transformers, plausibly the same loading regression.", "right": "issue:45310"}, {"accept": false, "left": "issue:43479", "reason": "Phi4Multimodal config defaults and Whisper timestamp decoding are unrelated.", "right": "issue:44869"}, {"accept": false, "left": "issue:42175", "reason": "Packaging/backend dependency issue vs. missing image_utils import; no shared bug.", "right": "issue:44933"}, {"accept": false, "left": "issue:42898", "reason": "Both are tokenizer behavior regressions in v5, but they concern different features and code paths.", "right": "issue:44568"}, {"accept": false, "left": "issue:43976", "reason": "Python version compatibility problem vs. image_utils import issue; unrelated.", "right": "issue:44933"}, {"accept": false, "left": "issue:43784", "reason": "Both are import errors, but they affect different symbols and failure modes, so not the same bug.", "right": "issue:43824"}], "summary": "This cluster is mostly heterogeneous: tokenizer regressions, import errors, multimodal config bugs, generation issues, and packaging/backend complaints are all mixed together. The only credible duplicate-like pair is the Qwen3/Qwen3.5 MoE from_pretrained failure reports."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:30990|issue:35141", "issue:43232|issue:44792", "issue:43479|issue:44079", "issue:43377|issue:44242", "issue:44038|issue:45310", "issue:43479|issue:44869", "issue:42175|issue:44933", "issue:42898|issue:44568", "issue:43976|issue:44933", "issue:43784|issue:43824"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7026, "estimated_input_tokens": 3385, "item_count": 18, "node_count": 18, "serialized_chars": 13539, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:32:07Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "cedda4751b70fbc2ae006d31d1bf4791a490f190ae2a7fa0ac5c07bfce06049f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42898", "issue:43278", "issue:43723", "issue:43874", "issue:44060", "issue:44188", "issue:44373", "issue:44568", "issue:44623", "issue:44734", "issue:44779", "issue:44964", "issue:45092", "issue:45103", "issue:45125", "issue:45137", "issue:45310", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:44188", "reason": "Different problems: attention-kernel divergence under torch.compile vs a docstring typo for position_ids.", "right": "issue:44373"}, {"accept": false, "left": "issue:44060", "reason": "Both involve Qwen models, but one is an incorrect tied-weights warning and the other is a missing _tp_plan for tensor parallelism; different code paths and fixes.", "right": "issue:45125"}, {"accept": false, "left": "issue:44964", "reason": "Unrelated failures: loading microsoft/Phi-4-multimodal-instruct vs DeepSpeed ZeRO3 deque underflow.", "right": "issue:45137"}, {"accept": false, "left": "issue:44734", "reason": "Serving KV-cache continuation tensor indexing bug is unrelated to auto_docstring crashing on future annotations.", "right": "issue:45103"}, {"accept": false, "left": "issue:43723", "reason": "Both are tokenizer regressions in v5, but one is AutoTokenizer loading and the other is add_special_tokens not adding BOS/EOS for a specific tokenizer.", "right": "issue:44568"}, {"accept": false, "left": "issue:42898", "reason": "Different tokenizer regressions: clean_up_tokenization_spaces behavior change vs Kimi-K2.5 codec handling and warning behavior.", "right": "issue:45356"}, {"accept": false, "left": "issue:44623", "reason": "Missing processor save files is a different issue from Qwen3.5 MoE from_pretrained failing.", "right": "issue:45310"}, {"accept": false, "left": "issue:43278", "reason": "Embedding dtype mismatch between train/eval is unrelated to the DeepSpeed ZeRO3 empty-deque crash.", "right": "issue:45137"}, {"accept": false, "left": "issue:43874", "reason": "Missing get_number_of_image_patches and remote-code meta-init incompatibility are both multimodal, but they are distinct bugs with different failure points.", "right": "issue:45092"}, {"accept": false, "left": "issue:42898", "reason": "Different tokenizer regressions: generic clean_up_tokenization_spaces behavior change vs DeepSeek tokenizer producing incorrect results.", "right": "issue:44779"}], "summary": "The items are a heterogeneous mix of unrelated bugs and regressions across tokenization, multimodal loading, serving, distributed training, and docstring tooling. None of the soft pairs look like the same underlying bug or change, so no duplicates are accepted and there is no clear canonical issue/PR."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44188|issue:44373", "issue:44060|issue:45125", "issue:44964|issue:45137", "issue:44734|issue:45103", "issue:43723|issue:44568", "issue:42898|issue:45356", "issue:44623|issue:45310", "issue:43278|issue:45137", "issue:43874|issue:45092", "issue:42898|issue:44779"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6960, "estimated_input_tokens": 3352, "item_count": 18, "node_count": 18, "serialized_chars": 13407, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:32:44Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a8acdd77a314892276393833a16bf73568e1f6df53a16380c9210935a2ae120a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42898", "issue:43295", "issue:43479", "issue:43644", "issue:44077", "issue:44206", "issue:44222", "issue:44242", "issue:44263", "issue:44297", "issue:44360", "issue:44448", "issue:44483", "issue:44484", "issue:45103", "issue:45137", "issue:45276", "issue:45406"], "result": {"analyst_result": {"best_issue_reason": "issue:44242 is the strongest representative by discussion volume and inbound references, with a concrete, well-scoped bug report. That said, it is not a true canonical for the whole cluster because the cluster is heterogeneous.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:44077", "reason": "Both involve model internals, but one is about patchtsmixer post_init handling and the other about missing load-balancing loss when router logits are off; different code paths and fixes.", "right": "issue:44242"}, {"accept": false, "left": "issue:42898", "reason": "Both mention v4/v5 output/behavior differences, but one is tokenization cleanup and the other is Pegasus generation parity; not the same bug.", "right": "issue:44448"}, {"accept": false, "left": "issue:45137", "reason": "DeepSpeed ZeRO3 deque underflow and gemma4 embedding-resize behavior are unrelated issues in different subsystems.", "right": "issue:45276"}, {"accept": false, "left": "issue:42898", "reason": "Tokenization-space cleanup behavior and processor.tokenizer/image-passing regression are separate regressions with different symptoms and fixes.", "right": "issue:43295"}, {"accept": false, "left": "issue:43479", "reason": "Phi4MultimodalConfig default-init behavior and non-persistent buffer junk are unrelated configuration vs state-init bugs.", "right": "issue:43644"}, {"accept": false, "left": "issue:44206", "reason": "Feature extractor center-arg regression and auto-docstring crash with future annotations are distinct failures in different code paths.", "right": "issue:45103"}, {"accept": false, "left": "issue:44222", "reason": "Both touch save_pretrained, but one is FP8 MoE saving and the other is tokenizer_class metadata mismatch; not the same underlying defect.", "right": "issue:44297"}, {"accept": false, "left": "issue:44222", "reason": "FP8 save_pretrained behavior and max_shard_size default discussion are different save_pretrained concerns, not one bug.", "right": "issue:44484"}, {"accept": false, "left": "issue:44483", "reason": "Chat/completions request rejection and Gemma4Processor missing _tokenizer are different serving/processor problems.", "right": "issue:45406"}, {"accept": false, "left": "issue:44263", "reason": "Both reference the DSA indexer, but one is about torch.split return values and the other about a missing ReLU; too specific and different to merge.", "right": "issue:44360"}], "summary": "This cluster is mostly a loose similarity group of unrelated Transformers v5 regression/behavior issues. The soft pairs share broad vocabulary (save_pretrained, tokenizer, processor, DSA, etc.) but do not look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44077|issue:44242", "issue:42898|issue:44448", "issue:45137|issue:45276", "issue:42898|issue:43295", "issue:43479|issue:43644", "issue:44206|issue:45103", "issue:44222|issue:44297", "issue:44222|issue:44484", "issue:44483|issue:45406", "issue:44263|issue:44360"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7110, "estimated_input_tokens": 3427, "item_count": 18, "node_count": 18, "serialized_chars": 13708, "soft_pair_count": 11}, "cached_at": "2026-04-14T16:33:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e2c326465cc5f7ca3c1a62c2a032bf9f4950f6fba9fa6b04f36344ddc90b04a8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:22355", "issue:28282", "issue:42831", "issue:43479", "issue:43606", "issue:43723", "issue:44222", "issue:44261", "issue:44263", "issue:44297", "issue:44514", "issue:44623", "issue:44871", "issue:44913", "issue:44977", "issue:45092", "issue:45103", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": "Issue 44514 is the best single issue candidate overall: it describes a specific crash path (`Qwen2_5_VLProcessor.apply_chat_template` on batched input with `padding=False`) and appears to have broad interest from inbound references.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44514 is the strongest representative to anchor the cluster because it is concrete, reproducible, and has the highest downstream discussion/reference activity among the listed issues.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:42831", "reason": "Different problems: one is an FP8 accuracy regression, the other is `processor.save_pretrained()` missing files. Same broad area, but not the same bug.", "right": "issue:44623"}, {"accept": false, "left": "issue:44222", "reason": "Both mention save/pretrained-related flows, but one is an FP8 MoE save bug and the other is missing processor artifacts; different failure modes and code paths.", "right": "issue:44623"}, {"accept": false, "left": "issue:43723", "reason": "`AutoTokenizer.from_pretrained` loading in v5 and Qwen3.5 flash-attention generation failures are unrelated.", "right": "issue:44977"}, {"accept": false, "left": "issue:22355", "reason": "One is a missing `transformers.onnx` module import, the other is a missing PyTorch dependency error for `AutoModel`; distinct import/dependency issues.", "right": "issue:28282"}, {"accept": false, "left": "issue:44263", "reason": "`torch.split()` return handling in a GLM MoE indexer is unrelated to InternVL2 remote-code/meta-init incompatibility.", "right": "issue:45092"}, {"accept": false, "left": "issue:44514", "reason": "A batched processor chat-template crash is not the same as an `auto_docstring` AttributeError from `from __future__ import annotations`.", "right": "issue:45103"}, {"accept": false, "left": "issue:44297", "reason": "Tokenizer save/load metadata mismatch and Qwen3.5 MoE `from_pretrained` errors are different bugs, even if both touch tokenizer/model loading.", "right": "issue:45310"}, {"accept": false, "left": "issue:43606", "reason": "CPU offload device mismatch for bark-small is unrelated to missing saved processor files.", "right": "issue:44623"}, {"accept": false, "left": "issue:43479", "reason": "Both involve config defaults on reload/init, but they affect different model configs and different fields (`vision/audio` vs `rotary_pct`).", "right": "issue:44913"}, {"accept": false, "left": "issue:43723", "reason": "Tokenizer loading in v5 is not the same as Gemma-3 `eos_token_id` list-vs-single-value config inconsistency.", "right": "issue:44871"}, {"accept": false, "left": "issue:44261", "reason": "An MLA RMS norm epsilon precision issue is unrelated to InternVL2 meta-initialization compatibility.", "right": "issue:45092"}], "summary": "These items are a mixed bag of unrelated Hugging Face Transformers issues; none of the soft-edge pairs look like the same underlying bug, so I would not merge any of them into a duplicate set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42831|issue:44623", "issue:44222|issue:44623", "issue:43723|issue:44977", "issue:22355|issue:28282", "issue:44263|issue:45092", "issue:44514|issue:45103", "issue:44297|issue:45310", "issue:43606|issue:44623", "issue:43479|issue:44913", "issue:43723|issue:44871", "issue:44261|issue:45092"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6410, "estimated_input_tokens": 3077, "item_count": 17, "node_count": 17, "serialized_chars": 12305, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:33:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "65b69ce638f02e26fefcd5b5938771b823290f8afcfe51fcb529fe4ff959062e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30990", "issue:38175", "issue:41628", "issue:42831", "issue:42886", "issue:42994", "issue:43723", "issue:43824", "issue:44295", "issue:44373", "issue:44442", "issue:44485", "issue:44623", "issue:44991", "issue:45092", "issue:45310", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": "Issue 43723 is the best global representative only in a narrow sense: it has the strongest generic 'tokenizer loading in v5' framing and some cross-related similarity to other tokenizer-loading reports. It is not a true canonical for the full cluster.", "best_pr_reason": null, "canonical_issue_reason": "No single issue cleanly represents the whole cluster. If one must be chosen, issue 43723 is the closest broad representative of the recurring tokenizer-loading regression theme in v5, but it only covers a subset of the items.", "canonical_pr_reason": null, "confidence": 0.86, "soft_edge_verdicts": [{"accept": false, "left": "issue:44373", "reason": "Both mention position_ids / RoPE, but one is a docstring complaint and the other is a GLM-5 implementation discussion; not the same bug or fix.", "right": "issue:44485"}, {"accept": false, "left": "issue:43723", "reason": "Both are tokenizer-loading failures in v5, but they concern different models and likely different root causes, so they are not the same underlying issue.", "right": "issue:44991"}, {"accept": false, "left": "issue:45092", "reason": "Both relate to multimodal model compatibility, but one is about old InternVL2 remote-code checkpoints and meta initialization, while the other is a Qwen3.5 save_pretrained regression; different code paths.", "right": "issue:45357"}, {"accept": false, "left": "issue:42886", "reason": "Offline tokenizer cache loading and quantized model saving are unrelated failure modes.", "right": "issue:42994"}, {"accept": false, "left": "issue:43723", "reason": "Both involve tokenizer loading, but FastSpeech2ConformerTokenizer and the v5 AutoTokenizer regression are distinct issues with no evidence of a shared bug.", "right": "issue:44442"}, {"accept": false, "left": "issue:30990", "reason": "Sentence Transformers loading hang and siglip2 zero probabilities are unrelated.", "right": "issue:38175"}, {"accept": false, "left": "issue:44295", "reason": "Registering position_ids as a buffer and missing processor.save_pretrained files are different problems in different code paths.", "right": "issue:44623"}, {"accept": false, "left": "issue:41628", "reason": "Both are import errors, but for different symbols and model classes; the failures do not indicate one shared missing export.", "right": "issue:43824"}, {"accept": false, "left": "issue:42831", "reason": "FineGrainedFP8 accuracy degradation and Qwen3.5 MoE from_pretrained failure are different bugs affecting different mechanisms.", "right": "issue:45310"}], "summary": "This cluster is heterogeneous: it mixes unrelated tokenizer/import/load issues, a save_pretrained bug, a quantization bug, and a RoPE/position_ids discussion. The soft pairs are only loosely text-similar and do not look like the same underlying defect or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44373|issue:44485", "issue:43723|issue:44991", "issue:45092|issue:45357", "issue:42886|issue:42994", "issue:43723|issue:44442", "issue:30990|issue:38175", "issue:44295|issue:44623", "issue:41628|issue:43824", "issue:42831|issue:45310"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6816, "estimated_input_tokens": 3280, "item_count": 18, "node_count": 18, "serialized_chars": 13117, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:34:32Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "574ec9a4b34e622be582f7f0c7e45d7ae751dab277f1253854e4324249b57f6a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30990", "issue:33357", "issue:42371", "issue:42831", "issue:42886", "issue:42907", "issue:43066", "issue:43122", "issue:43576", "issue:43784", "issue:44038", "issue:44117", "issue:44263", "issue:44373", "issue:44514", "issue:44521", "issue:44704", "issue:44945"], "result": {"analyst_result": {"best_issue_reason": "issue:44521 is the strongest standalone report here: it is open, recent, has meaningful discussion/inbound references, and describes a concrete reproducible bug in `apply_chat_template` for multimodal inputs.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits the cluster because the reports cover distinct failure modes (loading, tokenizer behavior, chat templating, FP8/TF32, processor plumbing, etc.) with no shared concrete code path.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43576", "reason": "Both mention v5-related breakage, but one is an env command bug and the other is an import-time `NameError`; different code paths and fixes.", "right": "issue:43784"}, {"accept": false, "left": "issue:44263", "reason": "One is a runtime tensor-splitting bug and the other is a docstring error; not the same underlying issue.", "right": "issue:44373"}, {"accept": false, "left": "issue:43122", "reason": "Tokenizer output changes and multimodal chat-template assistant mask behavior are different bugs affecting different APIs.", "right": "issue:44521"}, {"accept": false, "left": "issue:42831", "reason": "FP8 accuracy regression and a Qwen3-VL-Moe bug are too vague and target different model/runtime issues.", "right": "issue:44038"}, {"accept": false, "left": "issue:42886", "reason": "Offline cache loading failure and wrong tokenizer decoder type are unrelated tokenizer problems with distinct failure modes.", "right": "issue:43066"}, {"accept": false, "left": "issue:44117", "reason": "A mapping returning `None` and missing kwargs passed to `cached_file` are separate preprocessing/loader plumbing bugs.", "right": "issue:44704"}, {"accept": false, "left": "issue:30990", "reason": "Sentence-transformers loading hang and MacOS bus error on a CLIP model are unrelated runtime failures.", "right": "issue:33357"}, {"accept": false, "left": "issue:42371", "reason": "TF32 configuration guidance and a `torch.split()` behavior issue have no shared concrete bug path.", "right": "issue:44263"}, {"accept": false, "left": "issue:43122", "reason": "Different tokenizer output between versions and a batched `apply_chat_template` crash are separate regression reports.", "right": "issue:44514"}, {"accept": false, "left": "issue:42907", "reason": "Saving dequantized models and incorrect LLM output under pipeline parallelism are distinct model/export vs inference-parallelism problems.", "right": "issue:44945"}], "summary": "This is not a true duplicate cluster: the items span unrelated bugs/features across different subsystems and versions, and the soft links are mostly superficial keyword overlap."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43576|issue:43784", "issue:44263|issue:44373", "issue:43122|issue:44521", "issue:42831|issue:44038", "issue:42886|issue:43066", "issue:44117|issue:44704", "issue:30990|issue:33357", "issue:42371|issue:44263", "issue:43122|issue:44514", "issue:42907|issue:44945"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6478, "estimated_input_tokens": 3111, "item_count": 17, "node_count": 17, "serialized_chars": 12442, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:35:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "23d5870e5d71b5b02e1b0c8890334cf1d9f89943f721f6516c586bde1a75a050", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39401", "issue:41628", "issue:42907", "issue:43116", "issue:43232", "issue:43937", "issue:43976", "issue:44488", "issue:44625", "issue:44704", "issue:44743", "issue:44829", "issue:44945", "issue:45200", "issue:45245", "issue:45278", "issue:45290"], "result": {"analyst_result": {"best_issue_reason": "Issue #44625 is the most representative single issue in the set because it has the strongest external interest and a concrete, user-facing model configuration regression; however, the overall cluster is still too mixed to treat as one bug.", "best_pr_reason": null, "canonical_issue_reason": "No true canonical issue: the cluster does not form a single duplicate problem. If forced to pick a representative, issue #44625 is the closest because it is an active, widely referenced configuration bug, but it is not a duplicate of the others.", "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:44704", "reason": "Different bugs: cached_file kwargs forwarding in AutoProcessor vs apply_chat_template crashing on assistant tool-call messages.", "right": "issue:45290"}, {"accept": false, "left": "issue:43232", "reason": "Unrelated failures: generation kwargs after sync_gpus vs a category-cardinality RuntimeError.", "right": "issue:45245"}, {"accept": false, "left": "issue:43976", "reason": "Different areas and symptoms: Python 3.9/5.1.0 compatibility vs incorrect output under pipeline parallelism.", "right": "issue:44945"}, {"accept": false, "left": "issue:43232", "reason": "Not the same code-path: generation state handling after sync_gpus vs recurrent state reset in modular_qwen3_5.", "right": "issue:44743"}, {"accept": false, "left": "issue:39401", "reason": "Tokenizer offset_mapping bug is unrelated to a model-loading failure for cjvt/sleng-bert.", "right": "issue:44488"}, {"accept": false, "left": "issue:42907", "reason": "Different concrete issues: saving dequantized Ministral/Devstral models vs invalid GenerationConfig for GLM-5.", "right": "issue:43937"}, {"accept": false, "left": "issue:41628", "reason": "Both involve imports, but one is a missing AutoImageProcessor symbol and the other is broad import breakage after upgrading versions.", "right": "issue:45278"}, {"accept": false, "left": "issue:44625", "reason": "Different defect: num_labels propagation in Qwen3.5 config vs degenerate training with flash_attention_3.", "right": "issue:44829"}, {"accept": false, "left": "issue:43116", "reason": "Example-script multi-label classification returning empty results is unrelated to Gemma 4 mm_token_type_ids defaults.", "right": "issue:45200"}], "summary": "This cluster is heterogeneous: the items span unrelated bugs in tokenization, imports, generation, model config, and example scripts. The proposed soft pairs only share superficial wording or a broad subsystem, not the same concrete underlying defect."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44704|issue:45290", "issue:43232|issue:45245", "issue:43976|issue:44945", "issue:43232|issue:44743", "issue:39401|issue:44488", "issue:42907|issue:43937", "issue:41628|issue:45278", "issue:44625|issue:44829", "issue:43116|issue:45200"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6740, "estimated_input_tokens": 3242, "item_count": 18, "node_count": 18, "serialized_chars": 12966, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:35:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5609dd165c7e10c250239515fcfeab8e7fc39608ea5f9ab384404b7b8cc0b836", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29127", "issue:30990", "issue:39401", "issue:41628", "issue:43122", "issue:43408", "issue:43475", "issue:43992", "issue:44261", "issue:44295", "issue:44297", "issue:44360", "issue:44373", "issue:44485", "issue:44521", "issue:44704", "issue:44779", "issue:44987"], "result": {"analyst_result": {"best_issue_reason": "No issue here is a strong cluster anchor because the topics do not converge on one concrete defect.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out; the items span distinct subsystems and regression types, so this looks like a false-positive cluster.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43408", "reason": "Both are SAM 3 Video-related, but one is a model-type warning and the other is a missing attribute crash; different concrete bugs.", "right": "issue:43475"}, {"accept": false, "left": "issue:44360", "reason": "Completely different areas: DSA indexer/ReLU versus GLM-5 RoPE implementation.", "right": "issue:44485"}, {"accept": false, "left": "issue:43122", "reason": "Both mention tokenizer regressions in v5, but one is a broad version-to-version tokenization change and the other is Deepseek-specific incorrect results; not the same bug.", "right": "issue:44779"}, {"accept": false, "left": "issue:43992", "reason": "Both concern loading paths, but they fail in different APIs and different code paths (UMT5 weights vs AutoProcessor kwargs).", "right": "issue:44704"}, {"accept": false, "left": "issue:29127", "reason": "Unrelated: LayoutLMv3 box-handling error messaging versus Sentence Transformers loading hang.", "right": "issue:30990"}, {"accept": false, "left": "issue:44261", "reason": "One is a numerical/config bug in MLA q_a_layernorm, the other is a docstring issue for position_ids.", "right": "issue:44373"}, {"accept": false, "left": "issue:44295", "reason": "Different problems: registering position_ids as a buffer versus tokenizer_class metadata saved in tokenizer_config.json.", "right": "issue:44297"}, {"accept": false, "left": "issue:41628", "reason": "Both involve loading/importing in newer transformers versions, but the failure modes and targets are different and not one concrete fix.", "right": "issue:44987"}, {"accept": false, "left": "issue:39401", "reason": "Tokenizer offset_mapping and multimodal assistant_masks are distinct bugs, even though both touch tokenization-related functionality.", "right": "issue:44521"}], "summary": "These are mostly unrelated issues grouped only by loose textual similarity. I do not see any soft pair that clearly describes the same underlying bug or change, so all candidate duplicate links should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43408|issue:43475", "issue:44360|issue:44485", "issue:43122|issue:44779", "issue:43992|issue:44704", "issue:29127|issue:30990", "issue:44261|issue:44373", "issue:44295|issue:44297", "issue:41628|issue:44987", "issue:39401|issue:44521"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6854, "estimated_input_tokens": 3299, "item_count": 18, "node_count": 18, "serialized_chars": 13196, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:36:30Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9da257cf9ddc30e599f2381b68cb721e7f87636ac38c8c88f9e385e50dc477a3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36296", "issue:42371", "issue:42831", "issue:42907", "issue:43116", "issue:43122", "issue:43278", "issue:43452", "issue:43550", "issue:43824", "issue:43927", "issue:44038", "issue:44222", "issue:44295", "issue:44297", "issue:44351", "issue:44945", "issue:44977"], "result": {"analyst_result": {"best_issue_reason": "44297 is the strongest standalone issue in the set because it has a precise symptom, clear repro surface, and a focused code path, even though it is not meaningfully duplicated by the others.", "best_pr_reason": null, "canonical_issue_reason": "No true duplicate anchor stands out. If a representative issue is required, 44297 is the most concrete and self-contained bug report, centered on a specific tokenizer save_pretrained mismatch.", "canonical_pr_reason": null, "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "issue:42371", "reason": "Both concern numerical behavior, but one is about TF32 configuration guidance and the other is an FP8 accuracy regression; different causes and different fixes.", "right": "issue:42831"}, {"accept": false, "left": "issue:43550", "reason": "Torch.compile/SDPA failure in Bamba-9B-v2 is a model/compiler path issue, while the position_ids bug is about buffer registration and attribute access; unrelated defects.", "right": "issue:44295"}, {"accept": false, "left": "issue:42907", "reason": "One is a dequantized model save problem, the other is a gguf_file/from_pretrained loading incompatibility; both involve model I/O but not the same bug.", "right": "issue:43452"}, {"accept": false, "left": "issue:36296", "reason": "Tensor parallel training failure and multi-label classification returning empty results are completely different features and code paths.", "right": "issue:43116"}, {"accept": false, "left": "issue:43927", "reason": "DiaConfig custom token IDs being lost on save/load is a config serialization bug, while position_ids buffer access is a different runtime error.", "right": "issue:44295"}, {"accept": false, "left": "issue:44038", "reason": "Qwen3-VL-Moe compatibility in transformers 5.0 and tokenizer_class mismatch on save_pretrained are separate issues with different failure modes.", "right": "issue:44297"}, {"accept": false, "left": "issue:43824", "reason": "Both are import errors, but they concern different missing symbols and likely different release/export problems.", "right": "issue:44351"}, {"accept": false, "left": "issue:44038", "reason": "Fine-grained FP8 save_pretrained for moe models and a Qwen3-VL-Moe transformers 5.0 issue are not the same underlying bug.", "right": "issue:44222"}, {"accept": false, "left": "issue:43122", "reason": "Tokenizer behavior changes across versions and flash-attention generation failure are unrelated problems.", "right": "issue:44977"}, {"accept": false, "left": "issue:43278", "reason": "Embedding dtype drift between train/eval and incorrect output under pipeline parallelism are distinct runtime behaviors with different root causes.", "right": "issue:44945"}], "summary": "The cluster is mostly a set of unrelated bug reports with only superficial overlap in topics like saving/loading, imports, or precision. None of the soft pairs look like the same underlying defect, so they should not be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42371|issue:42831", "issue:43550|issue:44295", "issue:42907|issue:43452", "issue:36296|issue:43116", "issue:43927|issue:44295", "issue:44038|issue:44297", "issue:43824|issue:44351", "issue:44038|issue:44222", "issue:43122|issue:44977", "issue:43278|issue:44945"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6418, "estimated_input_tokens": 3081, "item_count": 17, "node_count": 17, "serialized_chars": 12323, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:37:12Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c9f4a22004b1acc08f4a6bac2a13d768fd783d43fb35995966d1bdac93c15a82", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38617", "issue:42222", "issue:42673", "issue:42907", "issue:43064", "issue:43441", "issue:43673", "issue:43784", "issue:43937", "issue:44075", "issue:44222", "issue:44485", "issue:44912", "issue:44933", "issue:44936", "issue:45072", "issue:45278"], "result": {"analyst_result": {"best_issue_reason": "issue:45278 is the best single issue to represent the cluster for triage because it is open, broad, and closest to a cross-cutting regression bucket; the others are narrower, model-specific failures.", "best_pr_reason": null, "canonical_issue_reason": "issue:45278 is the broadest umbrella-style report in the set: it describes a wide import-regression surface after a version upgrade, which makes it the best triage anchor among these issues.", "canonical_pr_reason": null, "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "issue:42222", "reason": "Different models and failure modes: vitpose import/breakage vs Ministral FlashAttention in v5 RC. No same code path.", "right": "issue:43441"}, {"accept": false, "left": "issue:42907", "reason": "Both involve quantization/saving, but one is about dequantized Ministral/Devstral save failures and the other about FP8 save_pretrained for MoE. Too different to treat as the same bug.", "right": "issue:44222"}, {"accept": false, "left": "issue:42907", "reason": "Both touch quantization, but one is a save-path bug and the other is MXFP4 load fallback for git-oss-20b. Different models and different code paths.", "right": "issue:44912"}, {"accept": false, "left": "issue:38617", "reason": "The newer issue is a broad import-regression umbrella, but the older one is a specific missing symbol import. Not enough evidence they are the same underlying bug.", "right": "issue:45278"}, {"accept": false, "left": "issue:43784", "reason": "Both are import errors, but they affect different symbols/modules and different downstream packages. The concrete breakages are not the same.", "right": "issue:44933"}, {"accept": false, "left": "issue:43064", "reason": "Training/FSDP optimizer-state corruption is unrelated to generation cache missing during chunked prefill.", "right": "issue:43673"}, {"accept": false, "left": "issue:42673", "reason": "VRAM leakage in multithreaded Qwen3 inference and bfloat16 dtype mismatches in SwitchTransformers/TimmWrapperModel are distinct inference bugs.", "right": "issue:45072"}, {"accept": false, "left": "issue:43937", "reason": "Same model family name, but one is invalid GenerationConfig handling and the other is RoPE implementation discussion; not the same concrete defect.", "right": "issue:44485"}, {"accept": false, "left": "issue:44075", "reason": "Optimizer SGD arguments being ignored is a different problem from trainer.evaluate() failing after trainer.train().", "right": "issue:44936"}], "summary": "The cluster is mostly a set of unrelated, model- or feature-specific bugs. None of the soft-edge pairs look safe to merge as duplicates; at best, a few share a very broad theme like import regressions or quantization, but the concrete failures differ."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42222|issue:43441", "issue:42907|issue:44222", "issue:42907|issue:44912", "issue:38617|issue:45278", "issue:43784|issue:44933", "issue:43064|issue:43673", "issue:42673|issue:45072", "issue:43937|issue:44485", "issue:44075|issue:44936"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6898, "estimated_input_tokens": 3321, "item_count": 18, "node_count": 18, "serialized_chars": 13282, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:37:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "bac9204d8e866a57b052bc8c8c261e321ffcf34ce27447fc820687c03ccdace5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:22355", "issue:29127", "issue:33453", "issue:39401", "issue:42371", "issue:42673", "issue:43116", "issue:43493", "issue:43825", "issue:43874", "issue:43906", "issue:43931", "issue:43937", "issue:44484", "issue:44779", "issue:44945", "issue:44977", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": "No global best issue is appropriate here because there is no coherent cluster; each item appears to describe a different problem in a different area or model family.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out: the set is heterogeneous, and the apparent matches are only superficial topic overlaps rather than one shared bug or code path.", "canonical_pr_reason": null, "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "issue:42371", "reason": "Different bugs: TF32 behavior/deprecation guidance versus a missing GLM46V image-processor method causing an AttributeError.", "right": "issue:43874"}, {"accept": false, "left": "issue:43116", "reason": "One is a multi-label example script returning empty results; the other is a SigLIP2 implementation discrepancy. Different code paths and symptoms.", "right": "issue:43493"}, {"accept": false, "left": "issue:22355", "reason": "Unrelated: missing transformers.onnx import versus layoutlmv3 error-message clarity around insufficient boxes information.", "right": "issue:29127"}, {"accept": false, "left": "issue:43116", "reason": "Both are user-facing issues, but one concerns classification example output and the other a pipeline() v5 translation support warning. Not the same bug.", "right": "issue:43825"}, {"accept": false, "left": "issue:39401", "reason": "Both are tokenizer regressions, but they affect different model families and behaviors (offset_mapping vs incorrect tokenization results). No clear shared root cause.", "right": "issue:44779"}, {"accept": false, "left": "issue:33453", "reason": "Tokenizer-related, but one is a loading regression and the other is an offset_mapping bug. Too broad to be the same underlying defect.", "right": "issue:39401"}, {"accept": false, "left": "issue:43931", "reason": "Model weight shape mismatch for Qwen3-VL is unrelated to incorrect output under pipeline parallelism.", "right": "issue:44945"}, {"accept": false, "left": "issue:43937", "reason": "Both involve Qwen3.5 generation, but one is invalid GenerationConfig and the other is a flash-attention generation failure. Different failure modes.", "right": "issue:44977"}, {"accept": false, "left": "issue:44484", "reason": "save_pretrained max_shard_size behavior is unrelated to a Qwen3.5 MoE from_pretrained loading error.", "right": "issue:45310"}, {"accept": false, "left": "issue:42673", "reason": "VRAM leakage in multi-dataloader threads is not the same bug as an isolated reproduction of a different referenced issue.", "right": "issue:43906"}], "summary": "These items are mostly unrelated standalone bugs across tokenizers, model loading, pipeline behavior, TF32 config, and memory usage. The soft pairs share only broad topical similarity, not the same concrete underlying defect, so none should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42371|issue:43874", "issue:43116|issue:43493", "issue:22355|issue:29127", "issue:43116|issue:43825", "issue:39401|issue:44779", "issue:33453|issue:39401", "issue:43931|issue:44945", "issue:43937|issue:44977", "issue:44484|issue:45310", "issue:42673|issue:43906"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7054, "estimated_input_tokens": 3399, "item_count": 18, "node_count": 18, "serialized_chars": 13593, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:38:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a1ffad89ef2c6279d7981dc9a283db76a21a795a2caf942143ff37795d6e11cb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38617", "issue:39401", "issue:41628", "issue:42673", "issue:43421", "issue:43450", "issue:43577", "issue:43582", "issue:43986", "issue:44117", "issue:44220", "issue:44291", "issue:44410", "issue:44829", "issue:45071", "issue:45081", "issue:45216", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": "issue:45071 is the strongest standalone report here: it is a broad, clearly stated regression with a concrete affected area and multiple inbound references. Still, it is not a duplicate anchor for the rest of the set.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:39401", "reason": "One is a Qwen3 tokenizer offset-mapping bug; the other is runtime special-token/post-processor desynchronization. Different symptom, different fix path.", "right": "issue:43421"}, {"accept": false, "left": "issue:43450", "reason": "Incorrect batched video tensor shape versus a crash when AutoProcessor loads a video model without torchvision. Related subsystem, but not the same defect.", "right": "issue:43986"}, {"accept": false, "left": "issue:38617", "reason": "Both are import errors, but for different missing symbols and modules. No evidence they share a single broken code path.", "right": "issue:41628"}, {"accept": false, "left": "issue:44291", "reason": "Both involve regressions around loading/configuration, but one is an unexpected constructor argument in init_empty_weights and the other is PretrainedConfig type checking. Distinct bugs.", "right": "issue:45071"}, {"accept": false, "left": "issue:44117", "reason": "TOKENIZER_MAPPING_NAMES returning None is unrelated to the Mistral regex patch crashing on a backend_tokenizer attribute access.", "right": "issue:45081"}, {"accept": false, "left": "issue:42673", "reason": "VRAM leak in multi-threaded dataloader use is unrelated to Blip2 dtype fields remaining float32 after loading.", "right": "issue:43577"}, {"accept": false, "left": "issue:44829", "reason": "Flash-attention-3 training degeneration and Qwen3.5 save_pretrained checkpoint corruption are different behaviors with different affected code paths.", "right": "issue:45216"}, {"accept": false, "left": "issue:44410", "reason": "Missing Qwen3next projections in layer 0 is not the same as a Qwen3.5 MoE from_pretrained error.", "right": "issue:45310"}, {"accept": false, "left": "issue:38617", "reason": "ImportError for layer_type_validation is unrelated to _torch_extract_fbank_features() behavior.", "right": "issue:44220"}, {"accept": false, "left": "issue:42673", "reason": "A VRAM leak in Qwen3ForCausalLM threads does not match a TypeError in caching_allocator_warmup on Apple Silicon.", "right": "issue:43582"}], "summary": "These items are thematically similar in places (imports, Qwen3, tokenizers, regressions), but the soft pairs describe different failure modes and code paths. No pair looks like the same underlying bug or fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:39401|issue:43421", "issue:43450|issue:43986", "issue:38617|issue:41628", "issue:44291|issue:45071", "issue:44117|issue:45081", "issue:42673|issue:43577", "issue:44829|issue:45216", "issue:44410|issue:45310", "issue:38617|issue:44220", "issue:42673|issue:43582"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6856, "estimated_input_tokens": 3300, "item_count": 18, "node_count": 18, "serialized_chars": 13200, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:39:05Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f50513617cf63fbf6440bbf6462ed9af8c3544fc7722f46d3b0b670cfcb11248", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41628", "issue:42175", "issue:42222", "issue:42371", "issue:42617", "issue:43066", "issue:43122", "issue:43408", "issue:43673", "issue:43742", "issue:43784", "issue:43937", "issue:44261", "issue:44496", "issue:44933", "issue:44964", "issue:45278", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": "Issue 45278 is the best single issue to keep as the cluster representative because it is open, recent, and broadly phrased around upgrade-triggered import regressions, which is the closest shared pattern among the items.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45278 is the broadest umbrella for the cluster: it describes multiple import failures after upgrading to 5.5.0 and best matches the recurring theme of version-related breakages. It is still not a true duplicate of every item, but it is the most representative issue in the set.", "canonical_pr_reason": null, "confidence": 0.43, "soft_edge_verdicts": [{"accept": false, "left": "issue:42175", "reason": "Different problems: missing TensorFlow backend in an install extra vs broken ViTPose model files/import path. Same project area, but not the same bug.", "right": "issue:42222"}, {"accept": false, "left": "issue:41628", "reason": "Both are import-related, but one is a missing exported class and the other is a NameError inside sentence-transformers integration. Different failure modes and code paths.", "right": "issue:43784"}, {"accept": false, "left": "issue:42371", "reason": "Unrelated: TF32 API settings warning versus MLA config precision issue. No shared underlying defect.", "right": "issue:44261"}, {"accept": false, "left": "issue:43937", "reason": "Both concern model/config validation, but one is an invalid GenerationConfig for GLM-5 and the other is an unrecognized model type for a different checkpoint. Different root causes.", "right": "issue:44496"}, {"accept": false, "left": "issue:44933", "reason": "The latter is a broad report about many import errors after an upgrade, while the former is a specific missing import from image_utils. Too general vs too specific to be the same bug.", "right": "issue:45278"}, {"accept": false, "left": "issue:43408", "reason": "Different model families and different symptoms: a sam3_video/sam3_tracker warning versus a KeyError loading MobileLLM. Not the same issue.", "right": "issue:43742"}, {"accept": false, "left": "issue:42617", "reason": "A runtime problem running a script is unrelated to a small testing_utils bug. No concrete overlap.", "right": "issue:45341"}, {"accept": false, "left": "issue:43066", "reason": "Both are tokenizer/v5 regressions, but one is about decoder type metadata and the other about changed tokenization output. Related area, but not clearly the same bug or fix.", "right": "issue:43122"}, {"accept": false, "left": "issue:44964", "reason": "Loading a specific multimodal model failing is not the same as a general multi-import regression report, even if both occurred after upgrades.", "right": "issue:45278"}, {"accept": false, "left": "issue:43673", "reason": "Generation cache/chunked_prefill behavior is unrelated to a testing_utils bug.", "right": "issue:45341"}], "summary": "This cluster is mostly a mix of unrelated Transformers regressions: import errors, model-loading/config issues, tokenizer behavior changes, and a few standalone bug reports. The only partial thematic overlap is around v5 tokenizer/import regressions, but the pairs are still too different to merge as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42175|issue:42222", "issue:41628|issue:43784", "issue:42371|issue:44261", "issue:43937|issue:44496", "issue:44933|issue:45278", "issue:43408|issue:43742", "issue:42617|issue:45341", "issue:43066|issue:43122", "issue:44964|issue:45278", "issue:43673|issue:45341"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6756, "estimated_input_tokens": 3250, "item_count": 18, "node_count": 18, "serialized_chars": 12998, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:39:32Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a02ea3526310c6e5aef86898c89fbfc753ceac815e5f46ff7ef26a7474de0c5a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41628", "issue:42222", "issue:42994", "issue:43012", "issue:43097", "issue:43408", "issue:43441", "issue:43874", "issue:43986", "issue:44261", "issue:44263", "issue:44484", "issue:44485", "issue:44493", "issue:44829", "issue:45003", "issue:45071", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": "Issue 45071 is the broadest framework-level regression in the set, but it is still not representative of the other issues.", "best_pr_reason": "No pull requests are present in the cluster.", "canonical_issue_reason": "No strong canonical issue: the set spans unrelated regressions in image processors, GLM internals, attention behavior, config typing, saving, and testing utilities.", "canonical_pr_reason": "No pull requests are present in the cluster.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43097", "reason": "Both mention model/config compatibility, but one is about a removed tie_embeddings API and the other about unexpected position-id keys; different failure modes and code paths.", "right": "issue:44493"}, {"accept": false, "left": "issue:43874", "reason": "GLM-related titles, but one is an ImageProcessorFast missing method causing token counting failure and the other is a missing rms_norm_eps config value causing precision error.", "right": "issue:44261"}, {"accept": false, "left": "issue:43986", "reason": "One is a torchvision/AutoProcessor loading crash for video models; the other is a question about save_pretrained shard sizing. No shared underlying bug.", "right": "issue:44484"}, {"accept": false, "left": "issue:43441", "reason": "Both mention FlashAttention, but one is a Ministral-3 v5 RC failure and the other is degenerate training with flash_attention_3 in sequence classification; not the same concrete issue.", "right": "issue:44829"}, {"accept": false, "left": "issue:42222", "reason": "VitPose model breakage and quantized model saving failure are unrelated.", "right": "issue:42994"}, {"accept": false, "left": "issue:44263", "reason": "Both are GLM issues, but one is a torch.split return-value problem in a DSA indexer and the other is RoPE implementation discussion; different bug surfaces.", "right": "issue:44485"}, {"accept": false, "left": "issue:43012", "reason": "A bfloat16 compile warning and a sam3_video vs sam3_tracker model-type warning are unrelated.", "right": "issue:43408"}, {"accept": false, "left": "issue:41628", "reason": "Both are API/regression complaints, but one is AutoImageProcessor import failure and the other is PretrainedConfig type-checking breakage; different affected interfaces.", "right": "issue:45071"}, {"accept": false, "left": "issue:45003", "reason": "Modeling-utils sys.modules access and a testing_utils bug are separate utility-layer issues with no clear shared root cause.", "right": "issue:45341"}], "summary": "This cluster is heterogeneous: the paired issues share superficial wording or a broad subsystem, but they describe different bugs, code paths, or model families. None of the soft edges look like true duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43097|issue:44493", "issue:43874|issue:44261", "issue:43986|issue:44484", "issue:43441|issue:44829", "issue:42222|issue:42994", "issue:44263|issue:44485", "issue:43012|issue:43408", "issue:41628|issue:45071", "issue:45003|issue:45341"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6882, "estimated_input_tokens": 3313, "item_count": 18, "node_count": 18, "serialized_chars": 13252, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:40:05Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fc684b539b3ee6d2a39f3832b82f9a29114976d1e0f447ac00e290fa1180dd4b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42222", "issue:43097", "issue:43335", "issue:43408", "issue:43452", "issue:43784", "issue:43874", "issue:43931", "issue:43976", "issue:43986", "issue:44117", "issue:44263", "issue:44368", "issue:44410", "issue:44496", "issue:44987", "issue:45071", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": "No single issue is a strong global representative; the items are too diverse for one issue to stand in for the cluster.", "best_pr_reason": "No pull requests are present, so there is no best PR candidate.", "canonical_issue_reason": "No canonical issue: the set is heterogeneous and does not converge on one underlying bug or code path.", "canonical_pr_reason": "No pull requests are present in this cluster.", "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "issue:43784", "reason": "Different problems: one is a Sentence-Transformers import NameError, the other is a Python-version compatibility regression in Transformers 5.1.0.", "right": "issue:43976"}, {"accept": false, "left": "issue:43097", "reason": "Unrelated bugs: removal of `tie_embeddings_and_encoder_decoder` is a config/API issue, while the other is GGUF loading for tokenizer/model classes.", "right": "issue:43452"}, {"accept": false, "left": "issue:42222", "reason": "Different model families and failures: ViTPose import/breakage vs. an unrecognized OLMo-Hybrid model config.", "right": "issue:44496"}, {"accept": false, "left": "issue:44117", "reason": "Both concern loading, but the concrete failures differ: tokenizer mapping returning None vs. a specific model load failure in `physical-intelligence/fast`.", "right": "issue:44987"}, {"accept": false, "left": "issue:43931", "reason": "Different multimodal/model-shape issues: Qwen3-VL weight mismatch vs. qwen3next missing attention projections in layer 0.", "right": "issue:44410"}, {"accept": false, "left": "issue:43986", "reason": "A video-model AutoProcessor crash without torchvision is unrelated to a small testing_utils bug.", "right": "issue:45341"}, {"accept": false, "left": "issue:43874", "reason": "Different code paths: a missing image-patch method causing multimodal token count errors vs. a torch.split return-value issue in GlmMoeDsaIndexer.", "right": "issue:44263"}, {"accept": false, "left": "issue:43408", "reason": "The titles mention warnings in similar areas, but one is a sam3 model-type mismatch and the other is a tie_word_embeddings warning for Qwen3.5 fine-tuning.", "right": "issue:44368"}, {"accept": false, "left": "issue:43335", "reason": "Both are config-related, but one is a sparse-layer creation bug in SwitchTransformers and the other is a broader PretrainedConfig type-checking regression.", "right": "issue:45071"}], "summary": "These reports span several unrelated Transformers bugs (model loading, config typing, tokenizer mappings, multimodal processors, and package compatibility). None of the soft-similarity pairs appear to describe the same underlying defect, so all candidate duplicate links should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43784|issue:43976", "issue:43097|issue:43452", "issue:42222|issue:44496", "issue:44117|issue:44987", "issue:43931|issue:44410", "issue:43986|issue:45341", "issue:43874|issue:44263", "issue:43408|issue:44368", "issue:43335|issue:45071"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6528, "estimated_input_tokens": 3136, "item_count": 17, "node_count": 17, "serialized_chars": 12542, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:40:33Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ba28a84d3289e3494dd05cd398f3999d0ac8e8fa8d38297505cd4e2e11a6e184", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38617", "issue:43064", "issue:43531", "issue:43931", "issue:43976", "issue:43986", "issue:44038", "issue:44261", "issue:44315", "issue:44351", "issue:44464", "issue:44485", "issue:44829", "issue:45071", "issue:45245", "issue:45341", "issue:45372"], "result": {"analyst_result": {"best_issue_reason": "No issue is a strong global representative because none of the soft links point to the same underlying bug; choosing any one as canonical would be misleading.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits: the items span unrelated bugs across dependency imports, model-specific behavior, training/runtime configuration, and test utilities.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43986", "reason": "Both are import/processor-loading failures, but they involve different missing symbols and different dependency paths: torchvision absence vs mistral_common's ReasoningEffort.", "right": "issue:45372"}, {"accept": false, "left": "issue:43931", "reason": "Both mention Qwen3-VL/MoE models, but one is a sliding_window behavior issue and the other is a broader Transformers 5.0 compatibility report; different concrete failures.", "right": "issue:44038"}, {"accept": false, "left": "issue:44829", "reason": "These are unrelated runtime bugs: one is degenerate training with flash_attention_3, the other is a categorical-cardinality limit error.", "right": "issue:45245"}, {"accept": false, "left": "issue:43976", "reason": "One reports a Python version compatibility break, the other a PretrainedConfig type-checking regression; same release family, different underlying issue.", "right": "issue:45071"}, {"accept": false, "left": "issue:43064", "reason": "Different training/integration bugs: FSDP2+PEFT+cpu_ram_efficient_loading optimizer-state corruption versus Liger Kernel not being applied with model_init.", "right": "issue:44315"}, {"accept": false, "left": "issue:44261", "reason": "Both touch model internals, but one is an rms_norm_eps precision/config omission and the other is a RoPE implementation discussion; not the same bug.", "right": "issue:44485"}, {"accept": false, "left": "issue:44464", "reason": "Chunked generation inconsistency with compiled forward is a runtime-generation bug, while v5.4.0 PretrainedConfig type checking is a separate API regression.", "right": "issue:45071"}, {"accept": false, "left": "issue:43531", "reason": "These are completely different subsystems: a Qwen3-MoE sliding_window bug versus a testing_utils bug.", "right": "issue:45341"}, {"accept": false, "left": "issue:38617", "reason": "Both are import errors, but they concern different exported names and different code paths, so they are not the same defect.", "right": "issue:44351"}], "summary": "This cluster is a loose thematic grouping of unrelated Hugging Face Transformers bugs; the soft pairs only share broad symptoms like import/model-loading/training issues, not the same concrete defect or code path."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43986|issue:45372", "issue:43931|issue:44038", "issue:44829|issue:45245", "issue:43976|issue:45071", "issue:43064|issue:44315", "issue:44261|issue:44485", "issue:44464|issue:45071", "issue:43531|issue:45341", "issue:38617|issue:44351"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6658, "estimated_input_tokens": 3201, "item_count": 17, "node_count": 17, "serialized_chars": 12802, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:40:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7e907c679ecb8127cbfd23253c45a679a64472555ab70b4c10919e9dd1a6bda4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38617", "issue:43066", "issue:43097", "issue:43502", "issue:43824", "issue:43906", "issue:43986", "issue:43992", "issue:44038", "issue:44062", "issue:44117", "issue:44410", "issue:44704", "issue:44964", "issue:45103", "issue:45341", "issue:45375"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:44704", "reason": "Different failure modes in different code paths: cached_file kwarg forwarding vs auto_docstring annotation handling.", "right": "issue:45103"}, {"accept": false, "left": "issue:44062", "reason": "Tokenizer AddedToken construction bug is unrelated to TOKENIZER_MAPPING_NAMES returning None during from_pretrained.", "right": "issue:44117"}, {"accept": false, "left": "issue:44410", "reason": "Both are Qwen-related, but one reports missing layer projections and the other a config field being dropped by @strict; distinct bugs.", "right": "issue:45375"}, {"accept": false, "left": "issue:43502", "reason": "local_files_only network requests and missing embed_tokens.weight during UMT5Encoder loading are separate issues.", "right": "issue:43992"}, {"accept": false, "left": "issue:43986", "reason": "AutoProcessor/video torchvision crash is unrelated to tokenizer mapping assumptions.", "right": "issue:44117"}, {"accept": false, "left": "issue:38617", "reason": "ImportError for layer_type_validation and removal of tie_embeddings_and_encoder_decoder are different API regressions.", "right": "issue:43097"}, {"accept": false, "left": "issue:44038", "reason": "Different model families and different symptoms: Qwen3-VL-Moe bug vs qwen3next missing projections.", "right": "issue:44410"}, {"accept": false, "left": "issue:44964", "reason": "Model-loading failure for Phi-4-multimodal is unrelated to a testing_utils bug.", "right": "issue:45341"}, {"accept": false, "left": "issue:43066", "reason": "Wrong tokenizer decoder type in v5 is not the same as an isolated reproduction of issue 38071.", "right": "issue:43906"}, {"accept": false, "left": "issue:43824", "reason": "ImportError for Qwen2_5_VLForConditionalGeneration is a different problem from AutoProcessor crashing without torchvision.", "right": "issue:43986"}], "summary": "This cluster is heterogeneous: the items span unrelated import errors, tokenizer/config regressions, model-specific loading issues, and utility bugs. None of the soft pairs look like the same underlying bug/change, so I would not merge any of them as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44704|issue:45103", "issue:44062|issue:44117", "issue:44410|issue:45375", "issue:43502|issue:43992", "issue:43986|issue:44117", "issue:38617|issue:43097", "issue:44038|issue:44410", "issue:44964|issue:45341", "issue:43066|issue:43906", "issue:43824|issue:43986"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6708, "estimated_input_tokens": 3226, "item_count": 17, "node_count": 17, "serialized_chars": 12903, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:41:27Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5ce85295f0e54391d02fec4f80439bd2b5932b125d17196f632d1d1aaf5f60a4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43012", "issue:43097", "issue:43299", "issue:43352", "issue:43644", "issue:43825", "issue:43873", "issue:43901", "issue:43957", "issue:43992", "issue:44351", "issue:44368", "issue:44380", "issue:44410", "issue:44484", "issue:44655", "issue:44829"], "result": {"analyst_result": {"best_issue_reason": "No issue is a strong global representative; the closest matches are still about different models, APIs, or failure modes.", "best_pr_reason": "No pull requests are present in the cluster.", "canonical_issue_reason": "No single issue is a clean canonical for this cluster because the items cover distinct bugs and feature/docs regressions across different code paths.", "canonical_pr_reason": "No pull requests are present in the cluster.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43957", "reason": "Different problems: meta-device model loading breakage vs an import failure for HybridCache. No shared code-path bug is evident.", "right": "issue:44351"}, {"accept": false, "left": "issue:43992", "reason": "UMT5Encoder missing embed_tokens.weight is a checkpoint-loading issue; max_shard_size in save_pretrained is a serialization/sharding question.", "right": "issue:44484"}, {"accept": false, "left": "issue:43097", "reason": "Removed tie_embeddings_and_encoder_decoder is a config/API change; offloading with quantization is a runtime memory/offload behavior issue.", "right": "issue:43873"}, {"accept": false, "left": "issue:43012", "reason": "Both involve training/model execution, but one is a bfloat16 compile warning and the other is degenerate training with flash_attention_3; different concrete failures.", "right": "issue:44829"}, {"accept": false, "left": "issue:43299", "reason": "Both mention Qwen, but one is a Qwen3VL MoE loading regression and the other is missing layer projections in qwen3next; not the same bug.", "right": "issue:44410"}, {"accept": false, "left": "issue:43352", "reason": "NemotronHForCausalLM lacking Flash Attention 2 support is model-specific; GPT2 attention scaling ignored under SDPA/FlashAttention is a separate backend/config bug.", "right": "issue:44380"}, {"accept": false, "left": "issue:43825", "reason": "Pipeline translation-task messaging is a docs/error-message issue, while save_pretrained on Pipeline objects is a persistence bug.", "right": "issue:44655"}, {"accept": false, "left": "issue:43644", "reason": "Non-persistent buffers filled with junk is a model initialization/state bug; max_shard_size in save_pretrained concerns checkpoint sharding limits.", "right": "issue:44484"}, {"accept": false, "left": "issue:43097", "reason": "A removed config flag is unrelated to docs still mentioning return_all_scores; one is behavior/API removal, the other is documentation drift.", "right": "issue:43901"}, {"accept": false, "left": "issue:43012", "reason": "Both mention warnings during model use, but they concern different configs and symptoms: bfloat16 compilation precision warning vs tie_word_embeddings warning.", "right": "issue:44368"}], "summary": "The cluster is not a single duplicate set; the soft pairs mostly connect unrelated model-loading, config, warning, docs, and save/load issues. No soft edge should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43957|issue:44351", "issue:43992|issue:44484", "issue:43097|issue:43873", "issue:43012|issue:44829", "issue:43299|issue:44410", "issue:43352|issue:44380", "issue:43825|issue:44655", "issue:43644|issue:44484", "issue:43097|issue:43901", "issue:43012|issue:44368"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6472, "estimated_input_tokens": 3108, "item_count": 17, "node_count": 17, "serialized_chars": 12430, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:42:18Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "863efd8009f06a021f4db9d648773d67b67c7bd78a7e4f5c63e1df923fbc8611", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42886", "issue:42907", "issue:43012", "issue:43064", "issue:43208", "issue:43352", "issue:43381", "issue:43525", "issue:43526", "issue:43550", "issue:43618", "issue:43937", "issue:43976", "issue:44117", "issue:44380", "issue:44655", "issue:44792"], "result": {"analyst_result": {"best_issue_reason": "No global best issue for deduplication purposes; none of these issues are duplicates or close enough to consolidate into one canonical report.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out because this cluster is heterogeneous: the issues span offline cache loading, model saving, training/FSDP, tokenizer mappings, attention configs, image processing, and test failures.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:42886", "reason": "Offline cache tokenizer loading and gradient-checkpointing-in-eval are unrelated failure modes in different code paths.", "right": "issue:43381"}, {"accept": false, "left": "issue:43937", "reason": "Invalid GenerationConfig for GLM-5 is unrelated to a Janus image-generation test failure.", "right": "issue:44792"}, {"accept": false, "left": "issue:42907", "reason": "Saving dequantized Ministrals/Devstrals and saving Pipeline objects are different save_pretrained problems with different targets.", "right": "issue:44655"}, {"accept": false, "left": "issue:43525", "reason": "A missing pad_token_id on Llama4Config is distinct from TOKENIZER_MAPPING_NAMES sometimes being None.", "right": "issue:44117"}, {"accept": false, "left": "issue:43012", "reason": "A bfloat16 compile warning is not the same as a Bamba-9B-v2 torch.compile SDPA correctness failure.", "right": "issue:43550"}, {"accept": false, "left": "issue:43352", "reason": "Nemotron Flash Attention 2 support and Python 3.9/3.10 compatibility are unrelated issues.", "right": "issue:43976"}, {"accept": false, "left": "issue:43208", "reason": "xLSTM training bugs and BeitImageProcessorFast label reduction are separate model/component bugs.", "right": "issue:43526"}, {"accept": false, "left": "issue:43064", "reason": "FSDP2/PEFT optimizer-state corruption and GPT2 attention scaling with SDPA/FlashAttention are different bugs in different training/inference paths.", "right": "issue:44380"}, {"accept": false, "left": "issue:43618", "reason": "CLIPOutput attentions regression is unrelated to GPT2 attention scaling behavior under SDPA/FlashAttention.", "right": "issue:44380"}], "summary": "All soft-similarity pairs are false positives; the items cover unrelated bugs, features, and subsystems rather than the same underlying issue."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42886|issue:43381", "issue:43937|issue:44792", "issue:42907|issue:44655", "issue:43525|issue:44117", "issue:43012|issue:43550", "issue:43352|issue:43976", "issue:43208|issue:43526", "issue:43064|issue:44380", "issue:43618|issue:44380"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6894, "estimated_input_tokens": 3319, "item_count": 18, "node_count": 18, "serialized_chars": 13275, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:42:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b8862a0739213992eee0688790d0c83b661e5249a4db4a411fd292efc8106eca", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36683", "issue:38617", "issue:43012", "issue:43299", "issue:43502", "issue:43582", "issue:43618", "issue:43756", "issue:43761", "issue:43867", "issue:44038", "issue:44230", "issue:44483", "issue:44485", "issue:44514", "issue:44683", "issue:45290", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "issue:43502 is the cleanest standalone bug report for triage: it has a specific incorrect behavior (`local_files_only=True` still triggering network requests) and a straightforward expected-vs-actual contract.", "best_pr_reason": null, "canonical_issue_reason": "issue:44483 is the broadest and most user-facing regression report in the set, with a clear broken endpoint and high severity, so it works best as the cluster anchor.", "canonical_pr_reason": null, "confidence": 0.79, "soft_edge_verdicts": [{"accept": false, "left": "issue:44483", "reason": "Both involve chat/template handling, but one is an API request acceptance failure and the other is a batched apply_chat_template crash; different code paths and symptoms.", "right": "issue:44514"}, {"accept": false, "left": "issue:36683", "reason": "Completely different subsystems: Gemma3 config attribute lookup vs Apple Silicon allocator warmup TypeError.", "right": "issue:43582"}, {"accept": false, "left": "issue:43618", "reason": "Both are CLIP regressions, but one is missing attentions assignment and the other is hidden_states being None in CLIPVisionModel.forward; not the same bug.", "right": "issue:43761"}, {"accept": false, "left": "issue:43012", "reason": "A PyTorch precision warning during bfloat16 compilation is unrelated to compiled flex_attention failing on torch>=2.9.", "right": "issue:44683"}, {"accept": false, "left": "issue:43756", "reason": "Both mention RoPE, but they concern different models and different implementation issues; no shared failing code path is evident.", "right": "issue:44485"}, {"accept": false, "left": "issue:44483", "reason": "Both touch chat-related APIs, but one is request validation for /v1/chat/completions and the other is tokenization crashing on assistant tool-call messages.", "right": "issue:45290"}, {"accept": false, "left": "issue:38617", "reason": "Importing a removed symbol from configuration_utils is unrelated to local_files_only still allowing network calls.", "right": "issue:43502"}, {"accept": false, "left": "issue:44483", "reason": "Chat request acceptance and Qwen2.5-VL video position_ids are different failures in different parts of the stack.", "right": "issue:45381"}, {"accept": false, "left": "issue:43299", "reason": "Qwen3-VL-Moe loading breakage and a sorted-state_dict loading error are not the same underlying defect.", "right": "issue:43867"}, {"accept": false, "left": "issue:44038", "reason": "Both mention Qwen3-VL/Moe, but one is a version breakage and the other is fp8 inference support; they are distinct changes/issues.", "right": "issue:44230"}], "summary": "This cluster is heterogeneous: the soft pairs share broad themes (Qwen/VL, chat templates, RoPE, CLIP, loading, warnings) but each reports a different concrete bug or regression. I do not see any true duplicates among the proposed pairs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44483|issue:44514", "issue:36683|issue:43582", "issue:43618|issue:43761", "issue:43012|issue:44683", "issue:43756|issue:44485", "issue:44483|issue:45290", "issue:38617|issue:43502", "issue:44483|issue:45381", "issue:43299|issue:43867", "issue:44038|issue:44230"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6594, "estimated_input_tokens": 3169, "item_count": 17, "node_count": 17, "serialized_chars": 12673, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:43:34Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ed36425636190bbe5c4bb2ee56d13c88396d681cee26f56951cbd7959f7a888e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36683", "issue:42994", "issue:43064", "issue:43065", "issue:43066", "issue:43352", "issue:43475", "issue:43867", "issue:43901", "issue:43937", "issue:44077", "issue:44220", "issue:44483", "issue:44492", "issue:44655", "issue:45216", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "issue:44655 has the widest scope and clearest user-facing serialization problem, making it the most useful representative issue for triage among these disparate reports.", "best_pr_reason": null, "canonical_issue_reason": "issue:44655 is the broadest and most representative report in the set, centered on save_pretrained/pipeline serialization behavior. It is the best single issue to anchor the cluster, even though it is not a duplicate of the other save/load-related reports.", "canonical_pr_reason": null, "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "issue:43901", "reason": "Docs mismatch for TextClassificationPipeline return_all_scores is a documentation/API behavior issue, not the same as Pipeline save_pretrained failing.", "right": "issue:44655"}, {"accept": false, "left": "issue:42994", "reason": "Both involve saving, but one is about quantized model saving and the other about Pipeline object serialization; different failure modes and code paths.", "right": "issue:44655"}, {"accept": false, "left": "issue:43065", "reason": "Sam3PixelDecoder dummy Conv2d and a v5 chat/completions request rejection are unrelated subsystems and bugs.", "right": "issue:44483"}, {"accept": false, "left": "issue:36683", "reason": "Gemma3Config missing vocab_size and Sam3VisionEncoderOutput missing fpn_position_embeddings are distinct model-specific attribute errors.", "right": "issue:43475"}, {"accept": false, "left": "issue:43867", "reason": "State_dict sorting causing load errors and a Qwen3.5 save_pretrained regression are both loading/saving themed, but they describe different concrete bugs and likely different fixes.", "right": "issue:45216"}, {"accept": false, "left": "issue:43352", "reason": "Flash Attention 2 support for NemotronHForCausalLM and GLM-5 GenerationConfig validation are separate model/feature issues.", "right": "issue:43937"}, {"accept": false, "left": "issue:44220", "reason": "_torch_extract_fbank_features() is an audio feature extraction bug, not a Pipeline.save_pretrained serialization bug.", "right": "issue:44655"}, {"accept": false, "left": "issue:43066", "reason": "Tokenizer decoder type in Transformers v5 and Kimi-K2.5 codec/fix_mistral_regex behavior are tokenizer-related but not the same underlying problem.", "right": "issue:45356"}, {"accept": false, "left": "issue:43064", "reason": "FSDP2/PEFT optimizer-state corruption and a cache-strategy typo are completely unrelated.", "right": "issue:44492"}, {"accept": false, "left": "issue:36683", "reason": "Gemma3Config missing vocab_size and patchtsmixer post_init policy are unrelated model/config issues.", "right": "issue:44077"}], "summary": "This cluster is mostly a set of unrelated issue reports that only share broad vocabulary around saving/loading, tokenizers, or model/runtime errors. No soft pair looks like the same underlying bug strongly enough to merge, so all soft edges are rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43901|issue:44655", "issue:42994|issue:44655", "issue:43065|issue:44483", "issue:36683|issue:43475", "issue:43867|issue:45216", "issue:43352|issue:43937", "issue:44220|issue:44655", "issue:43066|issue:45356", "issue:43064|issue:44492", "issue:36683|issue:44077"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7024, "estimated_input_tokens": 3384, "item_count": 18, "node_count": 18, "serialized_chars": 13536, "soft_pair_count": 11}, "cached_at": "2026-04-14T16:44:33Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c9fc8659d32233059f8812e9cecd867bd83da10000a5fd10bbf3092c769845eb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36683", "issue:41669", "issue:43010", "issue:43352", "issue:43441", "issue:43525", "issue:43618", "issue:43976", "issue:43992", "issue:44077", "issue:44188", "issue:44242", "issue:44246", "issue:44380", "issue:44938", "issue:45216", "issue:45278", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": "`issue:44246` is the best representative issue because it states the general symptom cleanly and can subsume the more specific optimization proposal in `issue:41669`.", "best_pr_reason": null, "canonical_issue_reason": "`issue:44246` is the broadest user-facing report in the only apparent duplicate pair, and it matches the import-time slowdown symptom directly.", "canonical_pr_reason": null, "confidence": 0.77, "soft_edge_verdicts": [{"accept": false, "left": "issue:43992", "reason": "Both are `from_pretrained` loading failures, but they affect different model families and look like separate weight-mapping bugs.", "right": "issue:45310"}, {"accept": false, "left": "issue:44077", "reason": "`post_init`/API contract enforcement is unrelated to GPT2 attention scaling under SDPA/FlashAttention.", "right": "issue:44380"}, {"accept": false, "left": "issue:36683", "reason": "Same error shape, but different missing config attributes on different models; likely separate model-specific regressions.", "right": "issue:43525"}, {"accept": false, "left": "issue:43976", "reason": "Both are Python-version compatibility reports, but they target different versions and failure modes, not the same bug.", "right": "issue:44938"}, {"accept": false, "left": "issue:44188", "reason": "Both mention attention kernels, but one is a torch.compile branching divergence and the other is ignored scaling config; different code paths.", "right": "issue:44380"}, {"accept": false, "left": "issue:43618", "reason": "CLIP attentions assignment and Qwen3.5 checkpoint saving are unrelated behaviors.", "right": "issue:45216"}, {"accept": false, "left": "issue:36683", "reason": "A missing config attribute and missing load-balancing loss are unrelated issues.", "right": "issue:44242"}, {"accept": false, "left": "issue:43010", "reason": "`@torch.no_grad` on cache/layer updates is unrelated to general import errors after upgrading Transformers.", "right": "issue:45278"}, {"accept": false, "left": "issue:43441", "reason": "Different models and different FlashAttention problems; they are not the same concrete bug.", "right": "issue:44380"}, {"accept": true, "left": "issue:41669", "reason": "Both describe the same underlying problem: `import transformers` is slow, and 41669 proposes the import-time cleanup that addresses that symptom.", "right": "issue:44246"}, {"accept": false, "left": "issue:36683", "reason": "Different model-specific failures: missing `vocab_size` vs unsupported Flash Attention 2.0.", "right": "issue:43352"}], "summary": "This cluster is mostly heterogeneous Transformers bug reports. The only likely duplicate-like pair is the import slowdown reports (`remove import * usage...` vs `import transformers takes long sometimes`); the rest are distinct model/config/version regressions."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43992|issue:45310", "issue:44077|issue:44380", "issue:36683|issue:43525", "issue:43976|issue:44938", "issue:44188|issue:44380", "issue:43618|issue:45216", "issue:36683|issue:44242", "issue:43010|issue:45278", "issue:43441|issue:44380", "issue:41669|issue:44246", "issue:36683|issue:43352"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7060, "estimated_input_tokens": 3402, "item_count": 18, "node_count": 18, "serialized_chars": 13606, "soft_pair_count": 11}, "cached_at": "2026-04-14T16:45:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "183e39f2cf235dc653d2777437e829c0e0ae2bdf19fbc38b5afadaf3f65202d2", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36683", "issue:41669", "issue:43010", "issue:43064", "issue:43352", "issue:43618", "issue:43644", "issue:43867", "issue:43931", "issue:43976", "issue:44077", "issue:44484", "issue:44554", "issue:44938", "issue:45071", "issue:45357", "issue:45375", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "issue:45071 is the strongest standalone representative because it has the widest apparent impact and the highest cross-references among the listed issues.", "best_pr_reason": null, "canonical_issue_reason": "issue:45071 is the broadest, most centrally referenced regression in the set and best represents the general v5 compatibility break theme, even though the cluster is heterogeneous.", "canonical_pr_reason": null, "confidence": 0.46, "soft_edge_verdicts": [{"accept": false, "left": "issue:43352", "reason": "Different models and different failures: unsupported Flash Attention 2.0 for Nemotron vs a Qwen3-VL weight-shape mismatch during loading.", "right": "issue:43931"}, {"accept": false, "left": "issue:43867", "reason": "One is a state_dict loading/sorting error; the other is a missing config field being dropped by strict parsing. Different code paths.", "right": "issue:45375"}, {"accept": false, "left": "issue:36683", "reason": "Gemma3Config missing an attribute is a config/schema bug, while the FSDP2+PEFT report is a distributed training optimizer-state issue.", "right": "issue:43064"}, {"accept": false, "left": "issue:44938", "reason": "Both are version-related regressions, but one is a Python 3.14 load failure and the other is PretrainedConfig type-checking breakage; not the same bug.", "right": "issue:45071"}, {"accept": false, "left": "issue:43867", "reason": "Loading a sorted state_dict and saving incorrect visual encoder keys are opposite directions and different underlying defects.", "right": "issue:45357"}, {"accept": false, "left": "issue:43010", "reason": "Cache/layer update no_grad decoration is unrelated to Python 3.9+ vs 3.10+ compatibility.", "right": "issue:43976"}, {"accept": false, "left": "issue:44938", "reason": "Python 3.14 import/load failure and Qwen2.5-VL video position-id errors are different model/runtime issues.", "right": "issue:45381"}, {"accept": false, "left": "issue:43618", "reason": "Missing attentions assignment in CLIPOutput is unrelated to patchtsmixer post_init validation policy.", "right": "issue:44077"}, {"accept": false, "left": "issue:41669", "reason": "Import-star cleanup/performance discussion is unrelated to the MPS attention correctness bug.", "right": "issue:44554"}, {"accept": false, "left": "issue:43644", "reason": "Non-persistent buffer initialization corruption in v5 is a different regression from Python 3.14 load failure.", "right": "issue:44938"}, {"accept": false, "left": "issue:43867", "reason": "A state_dict loading error and a question about default max_shard_size in save_pretrained are not the same underlying issue.", "right": "issue:44484"}], "summary": "This cluster is mostly a grab bag of unrelated Transformers regressions and feature issues. The only loose common theme is broad v5-era compatibility/loading behavior, but none of the soft pairs look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43352|issue:43931", "issue:43867|issue:45375", "issue:36683|issue:43064", "issue:44938|issue:45071", "issue:43867|issue:45357", "issue:43010|issue:43976", "issue:44938|issue:45381", "issue:43618|issue:44077", "issue:41669|issue:44554", "issue:43644|issue:44938", "issue:43867|issue:44484"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6998, "estimated_input_tokens": 3371, "item_count": 18, "node_count": 18, "serialized_chars": 13483, "soft_pair_count": 11}, "cached_at": "2026-04-14T16:45:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a5d48c3a08756a4500794d365b444b542cd957a4714ceeeab010c59d8b373d90", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:28282", "issue:33453", "issue:36296", "issue:38617", "issue:42890", "issue:43010", "issue:43232", "issue:43502", "issue:43575", "issue:43618", "issue:43992", "issue:44062", "issue:44075", "issue:44112", "issue:44857", "issue:44964", "issue:44977", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": "No single best issue for deduplication; none of these is a clear representative of the rest, and the cluster does not form a coherent duplicate group.", "best_pr_reason": null, "canonical_issue_reason": "No canonical issue: the set is too heterogeneous and the titles point to distinct bugs/regressions rather than one shared defect.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:38617", "reason": "Both are loading-related, but one is an import error from configuration_utils and the other is a missing encoder weight during from_pretrained; different root causes and fixes.", "right": "issue:43992"}, {"accept": false, "left": "issue:36296", "reason": "Both mention tensor parallelism, but one is a generic training bug and the other is a specific OOM when loading a large Qwen2 model; not the same defect.", "right": "issue:43575"}, {"accept": false, "left": "issue:42890", "reason": "A flaky SamHQ integration test and a model-loading failure for Phi-4 multimodal are unrelated problems.", "right": "issue:44964"}, {"accept": false, "left": "issue:42890", "reason": "Both involve tests, but one is a seed-related flake and the other is a stale device override CI failure in GraniteSpeech; distinct issues.", "right": "issue:44112"}, {"accept": false, "left": "issue:28282", "reason": "A missing PyTorch dependency ImportError and a tokenizer-loading regression are unrelated failure modes.", "right": "issue:33453"}, {"accept": false, "left": "issue:43618", "reason": "CLIP attentions assignment and an AMP/CUDA crash in LwDetrImageLoss affect different code paths and symptoms.", "right": "issue:44857"}, {"accept": false, "left": "issue:43618", "reason": "CLIPOutput attentions regression and Qwen3.5 flash-attention generation issues are different model/runtime bugs.", "right": "issue:44977"}, {"accept": false, "left": "issue:43502", "reason": "Offline API-request leakage and a tokenizers.AddedToken keyword-argument error are unrelated.", "right": "issue:44062"}, {"accept": false, "left": "issue:43010", "reason": "A cache/layer no_grad decoration bug and ignored SGD optimizer arguments are not the same underlying change.", "right": "issue:44075"}, {"accept": false, "left": "issue:42890", "reason": "A specific SamHQ integration test flake and a generic testing_utils bug are too different to merge.", "right": "issue:45341"}, {"accept": false, "left": "issue:43010", "reason": "Both are generation-related, but one is about no_grad on cache/layer updates and the other is about _update_model_kwargs_for_generation after sync_gpus; separate code paths.", "right": "issue:43232"}], "summary": "These issues are broadly heterogeneous: import/load regressions, tensor-parallel/OOM problems, CI/test flakes, generation/cache bugs, and tokenizer/model-specific failures. No soft pair looks like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:38617|issue:43992", "issue:36296|issue:43575", "issue:42890|issue:44964", "issue:42890|issue:44112", "issue:28282|issue:33453", "issue:43618|issue:44857", "issue:43618|issue:44977", "issue:43502|issue:44062", "issue:43010|issue:44075", "issue:42890|issue:45341", "issue:43010|issue:43232"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6644, "estimated_input_tokens": 3194, "item_count": 17, "node_count": 17, "serialized_chars": 12776, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:46:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "dedc112c651413bed6d590e6ca45d29f6f5efdcd86558f1796f594b4945032ea", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30990", "issue:31795", "issue:42548", "issue:43010", "issue:43065", "issue:43502", "issue:43723", "issue:44222", "issue:44351", "issue:44355", "issue:44368", "issue:44387", "issue:44485", "issue:44556", "issue:44749", "issue:45125", "issue:45375"], "result": {"analyst_result": {"best_issue_reason": "If one issue must be used as a representative, #44556 is the broadest concrete loading/reload regression report after upgrading to v5, but it still does not subsume the others.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue cleanly represents the cluster: the items span unrelated topics (imports, loading, quantization OOM, config fields, docs, and performance).", "canonical_pr_reason": null, "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "issue:43723", "reason": "Both are about loading, but one is tokenizer loading in v5 and the other is checkpoint reload failure after a v4.57-to-v5 upgrade; different failure modes and likely different fixes.", "right": "issue:44556"}, {"accept": false, "left": "issue:42548", "reason": "Both are import errors, but they concern different missing exports (`PreTrainedModel` vs `HybridCache`) and do not point to the same underlying bug.", "right": "issue:44351"}, {"accept": false, "left": "issue:44387", "reason": "One reports int4 quantization OOM from higher CUDA reserved memory; the other is a reload/load compatibility problem. Not the same code-path issue.", "right": "issue:44556"}, {"accept": false, "left": "issue:44222", "reason": "`save_pretrained` FP8/MoE behavior and missing `_tp_plan` for tensor parallelism are distinct model-handling problems.", "right": "issue:45125"}, {"accept": false, "left": "issue:43010", "reason": "`@torch.no_grad` on cache/layer `update()` and a dummy `nn.Conv2d` in Sam3PixelDecoder are unrelated.", "right": "issue:43065"}, {"accept": false, "left": "issue:45125", "reason": "Both involve Qwen3.5 model/config gaps, but they are different missing fields in different components (`_tp_plan` vs `deepstack_visual_indexes`).", "right": "issue:45375"}, {"accept": false, "left": "issue:44355", "reason": "Running compiled Python files errors versus slowdown after upgrading from 4.57.3 to 5.3.0 are different symptoms with no clear shared bug.", "right": "issue:44749"}, {"accept": false, "left": "issue:30990", "reason": "Sentence Transformers loading hang and forward-method documentation confusion are entirely different issues.", "right": "issue:31795"}, {"accept": false, "left": "issue:43502", "reason": "`local_files_only=True` still triggering API requests and GLM-5 RoPE implementation are unrelated.", "right": "issue:44485"}, {"accept": false, "left": "issue:43010", "reason": "A no-grad decoration change for cache updates does not match a warning about `tie_word_embeddings=False` during LoRA fine-tuning.", "right": "issue:44368"}], "summary": "This cluster appears to be a false-positive mix of unrelated issues. Most items mention loading/import/config/regression problems in v5, but the concrete bugs and code paths differ, so none of the soft edges look like true duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43723|issue:44556", "issue:42548|issue:44351", "issue:44387|issue:44556", "issue:44222|issue:45125", "issue:43010|issue:43065", "issue:45125|issue:45375", "issue:44355|issue:44749", "issue:30990|issue:31795", "issue:43502|issue:44485", "issue:43010|issue:44368"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6534, "estimated_input_tokens": 3139, "item_count": 17, "node_count": 17, "serialized_chars": 12554, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:47:02Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f86aa21605f98347f252a963f2a06377713f0eac12e7ba4fbbb79b6cab9e6a9e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36296", "issue:38175", "issue:42831", "issue:42890", "issue:43232", "issue:43299", "issue:43502", "issue:43874", "issue:44075", "issue:44315", "issue:44355", "issue:44360", "issue:44373", "issue:44556", "issue:44655", "issue:44961", "issue:45071"], "result": {"analyst_result": {"best_issue_reason": "None of the issues is a good global representative. Issue 43502 has the most discussion, but it is still a distinct local_files_only download bug, not a unifier for the rest.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits this cluster; the items are unrelated and do not share a concrete bug path.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:44315", "reason": "Different problems: one is Liger Kernel application during model creation, the other is a DSA indexer activation bug. No shared code-path.", "right": "issue:44360"}, {"accept": false, "left": "issue:36296", "reason": "Tensor-parallel training versus SGD optimizer arguments are unrelated failure modes.", "right": "issue:44075"}, {"accept": false, "left": "issue:42890", "reason": "A flaky integration test missing set_seed is not the same as a wrong docstring for position_ids.", "right": "issue:44373"}, {"accept": false, "left": "issue:38175", "reason": "Unexpected zero probabilities in a specific vision model is unrelated to unwanted network requests under local_files_only.", "right": "issue:43502"}, {"accept": false, "left": "issue:43502", "reason": "local_files_only network leakage has nothing to do with the unrelated 'racoon' issue.", "right": "issue:44961"}, {"accept": false, "left": "issue:43299", "reason": "Both involve loading/version regressions, but they affect different model classes and different upgrade scenarios; not enough evidence of one concrete bug.", "right": "issue:44556"}, {"accept": false, "left": "issue:44355", "reason": "Compiled Python file execution errors and Pipeline.save_pretrained failures are separate code paths.", "right": "issue:44655"}, {"accept": false, "left": "issue:44355", "reason": "File-compilation/runtime errors are distinct from PretrainedConfig type-checking breakage.", "right": "issue:45071"}, {"accept": false, "left": "issue:42831", "reason": "FineGrainedFP8 accuracy concerns and a missing image-processor method causing AttributeError are unrelated.", "right": "issue:43874"}, {"accept": false, "left": "issue:42890", "reason": "A test flake due to missing set_seed is not the same as a generation/sync_gpus update bug.", "right": "issue:43232"}], "summary": "This cluster is heterogeneous: the issues cover unrelated bugs in model loading, training, tests, docs, download behavior, and version compatibility. I do not see a single underlying change or defect tying them together."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44315|issue:44360", "issue:36296|issue:44075", "issue:42890|issue:44373", "issue:38175|issue:43502", "issue:43502|issue:44961", "issue:43299|issue:44556", "issue:44355|issue:44655", "issue:44355|issue:45071", "issue:42831|issue:43874", "issue:42890|issue:43232"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7064, "estimated_input_tokens": 3404, "item_count": 18, "node_count": 18, "serialized_chars": 13615, "soft_pair_count": 12}, "cached_at": "2026-04-14T16:47:39Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "aacb6738e0154cc72b84adc072aa731c89fd5d372529e5ba71e7559dafedb8f3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:31795", "issue:33357", "issue:33453", "issue:34689", "issue:36296", "issue:38617", "issue:41628", "issue:42548", "issue:43576", "issue:43673", "issue:43824", "issue:43827", "issue:44222", "issue:44230", "issue:44351", "issue:44492", "issue:44961", "issue:45071"], "result": {"analyst_result": {"best_issue_reason": "Issue 45071 is the best cluster representative because it describes a core v5 API regression and has strong discussion/inbound reference signal, while the rest are narrower or unrelated breakages.", "best_pr_reason": null, "canonical_issue_reason": "No single issue is a true duplicate of the others; if one representative is needed, issue 45071 is the broadest high-signal v5 regression report in the set.", "canonical_pr_reason": null, "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "issue:44222", "reason": "Both mention fp8/moe, but one is about save_pretrained and the other about inference support; different failures and likely different fixes.", "right": "issue:44230"}, {"accept": false, "left": "issue:33453", "reason": "Both are model-loading regressions, but they affect different models/platforms and don't point to the same code-path bug.", "right": "issue:34689"}, {"accept": false, "left": "issue:33453", "reason": "MacOS CLIP bus error and tensor-parallel training are unrelated problems.", "right": "issue:36296"}, {"accept": false, "left": "issue:43827", "reason": "Documentation about pipeline removals is unrelated to an issue titled only 'racoon'.", "right": "issue:44961"}, {"accept": false, "left": "issue:43673", "reason": "A chunked_prefill cache bug and a typo in cache strategies are not the same underlying issue.", "right": "issue:44492"}, {"accept": false, "left": "issue:43576", "reason": "Broken transformers env command and a missing Qwen model export are unrelated.", "right": "issue:43824"}, {"accept": false, "left": "issue:43576", "reason": "Both are v5 regressions, but one is the env CLI and the other is PretrainedConfig type checking; different code paths and fixes.", "right": "issue:45071"}, {"accept": false, "left": "issue:38617", "reason": "ImportError for layer_type_validation and a broken env command are separate regressions.", "right": "issue:43576"}, {"accept": false, "left": "issue:33357", "reason": "Both are regressions, but one is a MacOS bus error on CLIP and the other is tokenizer loading; not the same bug.", "right": "issue:33453"}, {"accept": false, "left": "issue:41628", "reason": "Missing AutoImageProcessor and missing PreTrainedModel are distinct import/export failures.", "right": "issue:42548"}, {"accept": false, "left": "issue:31795", "reason": "Documentation confusion in model.forward is unrelated to the layer_type_validation import error.", "right": "issue:38617"}, {"accept": false, "left": "issue:43576", "reason": "Both are import/runtime breakages in v5, but they involve different symbols and likely different fixes.", "right": "issue:44351"}], "summary": "These issues are mostly a loose v5-regression / API-breakage cluster, but they do not describe the same underlying bug. The soft pairs are at best broad-theme matches and should be kept separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44222|issue:44230", "issue:33453|issue:34689", "issue:33453|issue:36296", "issue:43827|issue:44961", "issue:43673|issue:44492", "issue:43576|issue:43824", "issue:43576|issue:45071", "issue:38617|issue:43576", "issue:33357|issue:33453", "issue:41628|issue:42548", "issue:31795|issue:38617", "issue:43576|issue:44351"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7054, "estimated_input_tokens": 3399, "item_count": 18, "node_count": 18, "serialized_chars": 13596, "soft_pair_count": 12}, "cached_at": "2026-04-14T16:48:26Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4a2d8edd16df9e57b52be7dc9a5b0701644512d21ce93e081c7fe43da10508b6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36296", "issue:41628", "issue:43012", "issue:43526", "issue:43576", "issue:43901", "issue:44261", "issue:44273", "issue:44355", "issue:44360", "issue:44485", "issue:44556", "issue:44623", "issue:44908", "issue:44938", "issue:44961", "issue:45003", "issue:45230"], "result": {"analyst_result": {"best_issue_reason": "No global best issue for the cluster: the items are too unrelated to share one durable representative, even though some are individually well-scoped bugs.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue is justified: the issues do not describe one underlying bug or change, so choosing a representative would be arbitrary.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:44908", "reason": "Scheduler kwarg handling bug vs an unrelated placeholder issue; no shared code path or symptom.", "right": "issue:44961"}, {"accept": false, "left": "issue:43576", "reason": "v5 env-command regression and Python 3.14 import/load failure are different failures in different entry points.", "right": "issue:44938"}, {"accept": false, "left": "issue:44485", "reason": "RoPE implementation discussion is unrelated to the 'racoon' issue.", "right": "issue:44961"}, {"accept": false, "left": "issue:44273", "reason": "Both touch loading, but one is a lazy-loading bug and the other is unsafe sys.modules access; not the same defect.", "right": "issue:45003"}, {"accept": false, "left": "issue:36296", "reason": "Tensor-parallel training failure and TextClassificationPipeline docs mismatch are unrelated.", "right": "issue:43901"}, {"accept": false, "left": "issue:41628", "reason": "AutoImageProcessor import error vs compiled-Python-file errors are different code paths and symptoms.", "right": "issue:44355"}, {"accept": false, "left": "issue:36296", "reason": "Tensor-parallel training bug does not match a bfloat16 precision warning during compilation.", "right": "issue:43012"}, {"accept": false, "left": "issue:36296", "reason": "Training bug and BeitImageProcessorFast reduce_labels bug are unrelated components.", "right": "issue:43526"}, {"accept": false, "left": "issue:44261", "reason": "MLA q_a_layernorm epsilon precision issue is unrelated to the 'racoon' issue.", "right": "issue:44961"}, {"accept": false, "left": "issue:44261", "reason": "Different model internals: missing rms_norm_eps in MLA vs missing ReLU in the DSA indexer.", "right": "issue:44360"}, {"accept": false, "left": "issue:43576", "reason": "Both mention v5 breakage, but one is the env command and the other is checkpoint reload compatibility; not the same bug.", "right": "issue:44556"}, {"accept": false, "left": "issue:44623", "reason": "Specific missing-files bug in processor.save_pretrained is not supported by the generic 'Bug report' title/body.", "right": "issue:45230"}], "summary": "This set is heterogeneous: the suggested soft edges span unrelated bugs, docs mismatches, CLI issues, model-loading regressions, and architecture-specific defects. I don't see a real duplicate cluster here, so all soft matches should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44908|issue:44961", "issue:43576|issue:44938", "issue:44485|issue:44961", "issue:44273|issue:45003", "issue:36296|issue:43901", "issue:41628|issue:44355", "issue:36296|issue:43012", "issue:36296|issue:43526", "issue:44261|issue:44961", "issue:44261|issue:44360", "issue:43576|issue:44556", "issue:44623|issue:45230"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6404, "estimated_input_tokens": 3074, "item_count": 17, "node_count": 17, "serialized_chars": 12296, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:48:50Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8fa5182edc6d5f0a4185cf1bdc614aa948e23147f622d4a40a307e36ef817b4c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41669", "issue:42371", "issue:42890", "issue:43673", "issue:43937", "issue:44038", "issue:44246", "issue:44273", "issue:44360", "issue:44393", "issue:44492", "issue:44556", "issue:44704", "issue:44908", "issue:45071", "issue:45092", "issue:45125"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:41669", "reason": "One is about slow `import *` scanning in models; the other is a generation cache regression during chunked prefill. Different code paths and symptoms.", "right": "issue:43673"}, {"accept": false, "left": "issue:44393", "reason": "Qwen3-VL 2D bounding-box output errors are a model-specific generation/formatting bug, while the other is a broad `PretrainedConfig` type-checking regression.", "right": "issue:45071"}, {"accept": false, "left": "issue:44038", "reason": "Qwen3-VL-Moe bug versus missing `_tp_plan` for tensor parallelism. Same family name, but different failure modes and likely different fixes.", "right": "issue:45125"}, {"accept": false, "left": "issue:43937", "reason": "GenerationConfig validation for GLM-5 is unrelated to the DSA indexer/ReLU discussion; no shared concrete bug.", "right": "issue:44360"}, {"accept": false, "left": "issue:44556", "reason": "Checkpoint reload compatibility after upgrading v5.x is not the same issue as old InternVL2 remote-code checkpoints failing meta initialization.", "right": "issue:45092"}, {"accept": false, "left": "issue:42371", "reason": "TF32 API guidance and a cache-strategy typo are unrelated; one is configuration/API usage, the other is a wording/documentation issue.", "right": "issue:44492"}, {"accept": false, "left": "issue:42890", "reason": "Missing `set_seed()` in SamHQ tests is a flaky-test problem, while this is an inverse_sqrt scheduler kwarg propagation bug.", "right": "issue:44908"}, {"accept": false, "left": "issue:44246", "reason": "Both mention loading/import performance, but one is about slow `import transformers` and the other is lazy loading malfunction; not the same underlying defect.", "right": "issue:44273"}, {"accept": false, "left": "issue:44704", "reason": "AutoProcessor kwarg forwarding to `cached_file` is unrelated to scheduler kwargs not being passed through.", "right": "issue:44908"}], "summary": "The candidate pairs are mostly title-similar but describe different bugs, regressions, or model-specific issues. None look like the same underlying change or fix, so I would not collapse any of the soft edges."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:41669|issue:43673", "issue:44393|issue:45071", "issue:44038|issue:45125", "issue:43937|issue:44360", "issue:44556|issue:45092", "issue:42371|issue:44492", "issue:42890|issue:44908", "issue:44246|issue:44273", "issue:44704|issue:44908"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7312, "estimated_input_tokens": 3528, "item_count": 18, "node_count": 18, "serialized_chars": 14110, "soft_pair_count": 13}, "cached_at": "2026-04-14T16:49:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2647bf60b3ad7fba6cf316c33f149650f8905be45a573881448a4a1dc30991f8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41669", "issue:43352", "issue:43408", "issue:43576", "issue:43827", "issue:43976", "issue:44295", "issue:44297", "issue:44315", "issue:44351", "issue:44393", "issue:44492", "issue:44829", "issue:44908", "issue:44945", "issue:45071", "issue:45230", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": "None of the issues is suitable as a global best representative; the cluster is heterogeneous rather than a duplicate set.", "best_pr_reason": "No pull requests are present to evaluate as a best PR.", "canonical_issue_reason": "No single issue is a good canonical representative because the items span unrelated bugs and regressions across different code paths.", "canonical_pr_reason": "No pull requests are present in this cluster.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43408", "reason": "Both involve model instantiation warnings, but one is about sam3_video vs sam3_tracker type mismatch and the other is a Liger Kernel not being applied with model_init; different bugs.", "right": "issue:44315"}, {"accept": false, "left": "issue:43576", "reason": "transformers env broken in v5 is unrelated to inverse_sqrt scheduler ignoring lr_scheduler_kwargs; different command and code path.", "right": "issue:44908"}, {"accept": false, "left": "issue:43976", "reason": "Python version compatibility regression does not match a Qwen3-VL 2D bounding-box hallucination/output bug.", "right": "issue:44393"}, {"accept": false, "left": "issue:43352", "reason": "Flash Attention 2 support error for Nemotron is unrelated to Qwen3-VL bounding-box output issues.", "right": "issue:44393"}, {"accept": false, "left": "issue:44908", "reason": "A scheduler kwargs propagation bug is not the same as PretrainedConfig type-checking breakage.", "right": "issue:45071"}, {"accept": false, "left": "issue:41669", "reason": "import * performance regression in models is unrelated to a missing HybridCache import/export issue.", "right": "issue:44351"}, {"accept": false, "left": "issue:44315", "reason": "Liger Kernel not applied via model_init and degenerate training with flash_attention_3 are different failures in different paths.", "right": "issue:44829"}, {"accept": false, "left": "issue:44297", "reason": "Tokenizer save_pretrained metadata mismatch is not the same as an unspecified 'Bug report' with no clear concrete target.", "right": "issue:45230"}, {"accept": false, "left": "issue:41669", "reason": "import * slowdown in models does not match a bug in testing_utils.py.", "right": "issue:45341"}, {"accept": false, "left": "issue:41669", "reason": "A model import-performance issue is unrelated to a typo in cache strategies.", "right": "issue:44492"}, {"accept": false, "left": "issue:44492", "reason": "A typo fix in cache strategies is not the same as incorrect LLM output under pipeline parallelism.", "right": "issue:44945"}, {"accept": false, "left": "issue:44295", "reason": "Position_ids buffer access error is unrelated to an unspecific bug report with no evidence of the same root cause.", "right": "issue:45230"}, {"accept": false, "left": "issue:43827", "reason": "Docs still referencing pipeline() after v5 removals is unrelated to inverse_sqrt scheduler kwargs handling.", "right": "issue:44908"}], "summary": "These are not duplicate issues; the soft pairs mostly share only vague subsystem overlap or generic bug language, but each points to a different concrete failure mode. No canonical issue emerges from this set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43408|issue:44315", "issue:43576|issue:44908", "issue:43976|issue:44393", "issue:43352|issue:44393", "issue:44908|issue:45071", "issue:41669|issue:44351", "issue:44315|issue:44829", "issue:44297|issue:45230", "issue:41669|issue:45341", "issue:41669|issue:44492", "issue:44492|issue:44945", "issue:44295|issue:45230", "issue:43827|issue:44908"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7080, "estimated_input_tokens": 3412, "item_count": 18, "node_count": 18, "serialized_chars": 13648, "soft_pair_count": 12}, "cached_at": "2026-04-14T16:49:59Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d91fb4fd702e36213aa904f8c819b333b491c587e7dbfc947ace22173e4516ab", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:31795", "issue:33453", "issue:41669", "issue:43519", "issue:43576", "issue:43704", "issue:43723", "issue:43937", "issue:44077", "issue:44273", "issue:44393", "issue:44485", "issue:44655", "issue:44861", "issue:44908", "issue:45125", "issue:45230", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": "Issue 44861 is the clearest concrete bug report in the group: a specific crash condition with a narrow code path and an obvious fix target.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue emerges from this set: the items span unrelated failures across tokenizer loading, lazy loading, Qwen models, scheduler behavior, RoPE, pipeline saving, and CLI tooling.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:44077", "reason": "Different concrete bugs: optional post_init gating vs an AttributeError in tied-weight key handling.", "right": "issue:44861"}, {"accept": false, "left": "issue:41669", "reason": "One is import * performance overhead; the other is a VRAM leak in multi-threaded Qwen3ForCausalLM use.", "right": "issue:43704"}, {"accept": false, "left": "issue:31795", "reason": "Documentation confusion about forward args is not the same as a tokenizer-loading regression.", "right": "issue:33453"}, {"accept": false, "left": "issue:43723", "reason": "Tokenizer loading failure and Qwen3-VL 2D bbox output errors are unrelated code paths.", "right": "issue:44393"}, {"accept": false, "left": "issue:44655", "reason": "Saving Pipeline objects and crashing in _get_tied_weight_keys are separate issues.", "right": "issue:44861"}, {"accept": false, "left": "issue:43576", "reason": "Broken transformers env CLI and lazy loading behavior are different features and failures.", "right": "issue:44273"}, {"accept": false, "left": "issue:43937", "reason": "GLM-5 GenerationConfig validation and Qwen3-VL bounding-box hallucinations are distinct bugs.", "right": "issue:44393"}, {"accept": false, "left": "issue:45125", "reason": "Both involve Qwen3.5 Moe loading/tensor-parallel concerns, but the reported failures are different enough that they are not clearly the same bug.", "right": "issue:45310"}, {"accept": false, "left": "issue:44273", "reason": "Lazy loading failure and inverse_sqrt scheduler kwargs handling do not share the same underlying code path.", "right": "issue:44908"}, {"accept": false, "left": "issue:45230", "reason": "A generic bug report is not enough to match a specific Qwen3.5 Moe from_pretrained error.", "right": "issue:45310"}, {"accept": false, "left": "issue:43519", "reason": "Incorrect timestamp calculation in Qwen3VL Processor and GLM-5 RoPE implementation are unrelated model-specific bugs.", "right": "issue:44485"}, {"accept": false, "left": "issue:43519", "reason": "Timestamp calculation in a processor and scheduler kwargs handling are unrelated subsystems.", "right": "issue:44908"}], "summary": "These soft pairs are mostly broad topical similarities, but they do not describe the same underlying bug or change. I would not merge any of them as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44077|issue:44861", "issue:41669|issue:43704", "issue:31795|issue:33453", "issue:43723|issue:44393", "issue:44655|issue:44861", "issue:43576|issue:44273", "issue:43937|issue:44393", "issue:45125|issue:45310", "issue:44273|issue:44908", "issue:45230|issue:45310", "issue:43519|issue:44485", "issue:43519|issue:44908"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7134, "estimated_input_tokens": 3439, "item_count": 18, "node_count": 18, "serialized_chars": 13755, "soft_pair_count": 12}, "cached_at": "2026-04-14T16:50:45Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "28ada805fbff6fd38b7627ea81d8be71726b456a15217e32ed98fe881198376e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:31795", "issue:34689", "issue:37428", "issue:42371", "issue:42890", "issue:42913", "issue:43066", "issue:43502", "issue:43519", "issue:43704", "issue:44273", "issue:44315", "issue:44393", "issue:44485", "issue:44623", "issue:44908", "issue:44998", "issue:45071"], "result": {"analyst_result": {"best_issue_reason": "45071 is the strongest global representative: it is a clear core-regression report affecting `PretrainedConfig` type checking across v5.4.0, unlike the narrower or unrelated items.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45071 is the broadest, highest-impact regression in the set and the most representative anchor among otherwise unrelated reports.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:31795", "reason": "Documentation confusion about `input_ids`/`past_key_values` is unrelated to a Llama 3.2 model-loading breakage.", "right": "issue:34689"}, {"accept": false, "left": "issue:44315", "reason": "`model_init`/Liger application and Qwen3-VL 2D bbox output are different code paths and different bugs.", "right": "issue:44393"}, {"accept": false, "left": "issue:31795", "reason": "Docs wording issue does not match an import error in flash-attention utilities.", "right": "issue:37428"}, {"accept": false, "left": "issue:44273", "reason": "Lazy-loading behavior and `PretrainedConfig` type checking are separate concerns with no shared failure mode.", "right": "issue:45071"}, {"accept": false, "left": "issue:43704", "reason": "A VRAM leak in Qwen3ForCausalLM threading is unrelated to GLM-5 RoPE implementation discussion.", "right": "issue:44485"}, {"accept": false, "left": "issue:42371", "reason": "TF32 configuration guidance is unrelated to a Qwen3ForCausalLM memory leak.", "right": "issue:43704"}, {"accept": false, "left": "issue:43502", "reason": "`local_files_only=True` network requests and Qwen3VL timestamp calculation are distinct bugs.", "right": "issue:43519"}, {"accept": false, "left": "issue:43519", "reason": "Timestamp math in Qwen3VL and missing `processor.save_pretrained` files are different processor issues.", "right": "issue:44623"}, {"accept": false, "left": "issue:43519", "reason": "Processor timestamp calculations and multi-thread VRAM leaks do not point to the same underlying defect.", "right": "issue:43704"}, {"accept": false, "left": "issue:42913", "reason": "Both mention tokenizers/v5, but one is a general behavior change while the other is a decoder-type bug; not the same concrete issue.", "right": "issue:43066"}, {"accept": false, "left": "issue:42890", "reason": "A flaky integration test from missing `set_seed()` is unrelated to a VRAM leak.", "right": "issue:43704"}, {"accept": false, "left": "issue:44908", "reason": "Scheduler kwargs handling has no relation to the unrelated 'Unemployment' issue.", "right": "issue:44998"}], "summary": "These are mostly unrelated issues with only superficial lexical overlap (tokenizer/v5, processor, model-loading terms). None of the soft pairs look like the same underlying bug or change, so they should not be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:31795|issue:34689", "issue:44315|issue:44393", "issue:31795|issue:37428", "issue:44273|issue:45071", "issue:43704|issue:44485", "issue:42371|issue:43704", "issue:43502|issue:43519", "issue:43519|issue:44623", "issue:43519|issue:43704", "issue:42913|issue:43066", "issue:42890|issue:43704", "issue:44908|issue:44998"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7390, "estimated_input_tokens": 3567, "item_count": 18, "node_count": 18, "serialized_chars": 14265, "soft_pair_count": 14}, "cached_at": "2026-04-14T16:51:31Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ca923fe5917348495f86c800e08338e915275ab5afb736745c88e6a33c0eda04", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:31515", "issue:31795", "issue:33453", "issue:36296", "issue:36683", "issue:38617", "issue:43525", "issue:43572", "issue:43618", "issue:44230", "issue:44492", "issue:44556", "issue:44623", "issue:44861", "issue:44998", "issue:45290", "issue:45310", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": "issue:44556 is the clearest concrete regression in the set: a specific checkpoint reload failure after version upgrade. It is the best representative bug report, but not a duplicate anchor for the other items.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out: the cluster is too heterogeneous, and the apparent overlap is mostly lexical (loading/from_pretrained/config) rather than the same bug.", "canonical_pr_reason": null, "confidence": 0.9, "soft_edge_verdicts": [{"accept": false, "left": "issue:44861", "reason": "Different failures and code paths: tied-weight key handling vs apply_chat_template crash on tool-call assistant messages.", "right": "issue:45290"}, {"accept": false, "left": "issue:44998", "reason": "No substantive overlap; one is a nontechnical title, the other is a testing_utils bug report.", "right": "issue:45341"}, {"accept": false, "left": "issue:44492", "reason": "A typo in cache strategies is unrelated to the unrelated 44998 report.", "right": "issue:44998"}, {"accept": false, "left": "issue:43618", "reason": "CLIPOutput attentions regression and 44998 are unrelated.", "right": "issue:44998"}, {"accept": false, "left": "issue:44556", "reason": "Checkpoint reload regression is unrelated to the 44998 issue.", "right": "issue:44998"}, {"accept": false, "left": "issue:43525", "reason": "Missing pad_token_id in Gemma/Llama config is a different config bug than _get_tied_weight_keys crashing.", "right": "issue:44861"}, {"accept": false, "left": "issue:43572", "reason": "StableLmConfig pad_token_idx issue does not match the tied-weights AttributeError.", "right": "issue:44861"}, {"accept": false, "left": "issue:31515", "reason": "Slow from_pretrained loading and a missing import in configuration_utils are different bugs.", "right": "issue:38617"}, {"accept": false, "left": "issue:44230", "reason": "FP8 inference support for Qwen models is unrelated to processor.save_pretrained missing files.", "right": "issue:44623"}, {"accept": false, "left": "issue:31515", "reason": "Checkpoint loading slowness and tensor-parallel training bugs are distinct problems.", "right": "issue:36296"}, {"accept": false, "left": "issue:31515", "reason": "Performance of from_pretrained is unrelated to documentation confusion in model.forward.", "right": "issue:31795"}, {"accept": false, "left": "issue:31515", "reason": "Both mention loading, but one is slowness and the other is a tokenizer loading regression; different failure modes.", "right": "issue:33453"}, {"accept": false, "left": "issue:44230", "reason": "Same model family, but one is FP8 support/inference and the other is a from_pretrained error; not the same concrete fix.", "right": "issue:45310"}, {"accept": false, "left": "issue:31515", "reason": "from_pretrained loading slowness is unrelated to Gemma3Config missing vocab_size.", "right": "issue:36683"}], "summary": "The items are mostly heterogeneous bug reports that only share broad loading/configuration vocabulary. None of the soft pairs look like the same underlying defect, so all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44861|issue:45290", "issue:44998|issue:45341", "issue:44492|issue:44998", "issue:43618|issue:44998", "issue:44556|issue:44998", "issue:43525|issue:44861", "issue:43572|issue:44861", "issue:31515|issue:38617", "issue:44230|issue:44623", "issue:31515|issue:36296", "issue:31515|issue:31795", "issue:31515|issue:33453", "issue:44230|issue:45310", "issue:31515|issue:36683"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 988, "estimated_input_tokens": 366, "item_count": 2, "node_count": 2, "serialized_chars": 1463, "soft_pair_count": 1}, "cached_at": "2026-04-14T16:51:52Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4b62d224e18775cdeabae3d71bbe328192a1e557e8d25dfe5706ac2301675de1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43298", "issue:44230"], "result": {"analyst_result": {"best_issue_reason": "Neither issue is a good canonical representative for this cluster because they describe different problems in different model areas.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:43298", "reason": "Different underlying requests: audio continuous batching support vs FP8 inference support for Qwen models. No shared code-path problem is apparent.", "right": "issue:44230"}], "summary": "These issues are unrelated: one is about continuous batching for audio models, the other is about FP8 inference support for specific Qwen VL/MoE models. No duplicate relationship is evident."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43298|issue:44230"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2484, "estimated_input_tokens": 1114, "item_count": 4, "node_count": 4, "serialized_chars": 4453, "soft_pair_count": 6}, "cached_at": "2026-04-14T16:52:15Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "12349bc8671201ffb425fb65366de18a8b83468d971da444155d4d7be2fbe584", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:27088", "issue:43834", "issue:44365", "issue:44370"], "result": {"analyst_result": {"best_issue_reason": "issue 27088 is the best representative because it is specific and non-placeholder, while the others are too generic to safely collapse into one duplicate set.", "best_pr_reason": null, "canonical_issue_reason": "issue 27088 is the only concrete, actionable topic ('Translating docs to Turkish'); the others are generic template-like i18n issues and may not be the same underlying request.", "canonical_pr_reason": null, "confidence": 0.86, "soft_edge_verdicts": [{"accept": false, "left": "issue:27088", "reason": "Both are i18n translation requests, but one is Turkish-specific and the other is a generic placeholder; not enough to prove the same underlying request.", "right": "issue:44370"}, {"accept": false, "left": "issue:27088", "reason": "Same broad translation theme, but the second issue is a generic template entry rather than the same concrete language request.", "right": "issue:44365"}, {"accept": false, "left": "issue:27088", "reason": "High textual similarity, but the placeholder title/body makes it unsafe to treat as the same exact issue as the Turkish request.", "right": "issue:43834"}, {"accept": false, "left": "issue:43834", "reason": "They look like template-generated i18n issues, but there is no evidence they target the same language or request.", "right": "issue:44370"}, {"accept": false, "left": "issue:43834", "reason": "Nearly identical template wording is not enough; these could be distinct language translation requests.", "right": "issue:44365"}, {"accept": false, "left": "issue:44365", "reason": "The similarity is structural/template-based only, with no concrete shared language or unique bug/change to unify them.", "right": "issue:44370"}], "summary": "These are highly similar i18n translation-request issues, but the evidence suggests template-level similarity rather than true duplicates. Issue 27088 is the only concrete language-specific request; the others are generic placeholder-style issues that may refer to different languages, so they should not be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:27088|issue:44370", "issue:27088|issue:44365", "issue:27088|issue:43834", "issue:43834|issue:44370", "issue:43834|issue:44365", "issue:44365|issue:44370"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7318, "estimated_input_tokens": 3531, "item_count": 13, "node_count": 13, "serialized_chars": 14121, "soft_pair_count": 24}, "cached_at": "2026-04-14T16:53:07Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "06363ccd0c1296b6ef81961d6c9503a6361f3a97fc04533589568e46354947e0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36817", "issue:37015", "issue:41211", "issue:42503", "issue:42738", "issue:43671", "issue:43837", "issue:43909", "issue:44121", "issue:44205", "issue:45027", "issue:45177", "issue:45322"], "result": {"analyst_result": {"best_issue_reason": "Issue 43837 is the best representative issue for this cluster because it is a broad support request and sits near other audio/model-support requests, but it should not be treated as a canonical duplicate target.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43837 is a generic support request in the same broad model-addition space, but there is no true duplicate cluster here; it is only the least-specific representative among unrelated requests.", "canonical_pr_reason": null, "confidence": 0.9, "soft_edge_verdicts": [{"accept": false, "left": "issue:43909", "reason": "Different model requests: LFM2.5 Audio vs SAM3-LiteText. Same broad support theme, but not the same change.", "right": "issue:44205"}, {"accept": false, "left": "issue:43671", "reason": "Both are Qwen3 audio-related, but one is TTS and the other is ASR; separate model integrations.", "right": "issue:43837"}, {"accept": false, "left": "issue:44121", "reason": "Unrelated model families: sparse transformer/circuitgpt vs EUPE perception encoder.", "right": "issue:45322"}, {"accept": false, "left": "issue:43837", "reason": "Qwen3-ASR and LFM2.5 Audio are different models with different code paths.", "right": "issue:43909"}, {"accept": false, "left": "issue:43837", "reason": "ASR support request vs Meta EUPE encoder request; not the same underlying model addition.", "right": "issue:45322"}, {"accept": false, "left": "issue:37015", "reason": "NeoBERT and LFM2.5 Audio are unrelated model families.", "right": "issue:43909"}, {"accept": false, "left": "issue:43671", "reason": "Qwen3-TTS support and DeepSeek-OCR2 are different modalities and implementations.", "right": "issue:45177"}, {"accept": false, "left": "issue:37015", "reason": "NeoBERT vs Qwen3-ASR: different model architectures and targets.", "right": "issue:43837"}, {"accept": false, "left": "issue:43671", "reason": "Qwen3-TTS and Voxtral-4B-TTS-2603 are both TTS-themed, but they are distinct model additions.", "right": "issue:45027"}, {"accept": false, "left": "issue:37015", "reason": "NeoBERT and circuit-sparsity transformer are unrelated model requests.", "right": "issue:44121"}, {"accept": false, "left": "issue:43837", "reason": "ASR model support vs sparse transformer model request; not the same feature.", "right": "issue:44121"}, {"accept": false, "left": "issue:43837", "reason": "ASR support and OCR model support are different code paths.", "right": "issue:45177"}, {"accept": false, "left": "issue:43671", "reason": "TTS vs audio language model request; related domain, but not the same concrete model.", "right": "issue:43909"}, {"accept": false, "left": "issue:43671", "reason": "Qwen3-TTS and OpenAI weight-sparse transformer are unrelated.", "right": "issue:44121"}, {"accept": false, "left": "issue:41211", "reason": "DEIMv2 and Qwen3-TTS are different model families with no shared implementation target.", "right": "issue:43671"}, {"accept": false, "left": "issue:36817", "reason": "EuroBert config addition and DEIMv2 support are distinct requests.", "right": "issue:41211"}, {"accept": false, "left": "issue:37015", "reason": "NeoBERT and ModernVBERT are separate BERT-like model additions, not the same model.", "right": "issue:42503"}, {"accept": false, "left": "issue:36817", "reason": "EuroBert config support vs Qwen3-TTS support are unrelated additions.", "right": "issue:43671"}, {"accept": false, "left": "issue:37015", "reason": "NeoBERT and Voxtral TTS are different model families and modalities.", "right": "issue:45027"}, {"accept": false, "left": "issue:42738", "reason": "BERT-like models with RoPE and Voxtral TTS are not the same underlying change.", "right": "issue:45027"}, {"accept": false, "left": "issue:43837", "reason": "Qwen3-ASR and Voxtral TTS are both audio-related but distinct model integrations.", "right": "issue:45027"}, {"accept": false, "left": "issue:42503", "reason": "ModernVBERT and Qwen3-ASR are unrelated model requests.", "right": "issue:43837"}, {"accept": false, "left": "issue:42503", "reason": "ModernVBERT models and generic BERT-like models with RoPE overlap conceptually, but they are not the same concrete request.", "right": "issue:42738"}, {"accept": false, "left": "issue:42503", "reason": "ModernVBERT and Qwen3-TTS are different model support requests.", "right": "issue:43671"}], "summary": "These are model-request issues across different model families and modalities (vision, audio, OCR, sparse transformer, etc.), not duplicate reports of the same underlying change. No soft pair looks mergeable as a single duplicate; they only share a broad \u201cadd model support\u201d theme."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43909|issue:44205", "issue:43671|issue:43837", "issue:44121|issue:45322", "issue:43837|issue:43909", "issue:43837|issue:45322", "issue:37015|issue:43909", "issue:43671|issue:45177", "issue:37015|issue:43837", "issue:43671|issue:45027", "issue:37015|issue:44121", "issue:43837|issue:44121", "issue:43837|issue:45177", "issue:43671|issue:43909", "issue:43671|issue:44121", "issue:41211|issue:43671", "issue:36817|issue:41211", "issue:37015|issue:42503", "issue:36817|issue:43671", "issue:37015|issue:45027", "issue:42738|issue:45027", "issue:43837|issue:45027", "issue:42503|issue:43837", "issue:42503|issue:42738", "issue:42503|issue:43671"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 974, "estimated_input_tokens": 359, "item_count": 2, "node_count": 2, "serialized_chars": 1434, "soft_pair_count": 1}, "cached_at": "2026-04-14T16:53:22Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "55abfc108469d9d7518cc17abd778c086ca0bb0bb0c4ae9bb96fb0f11ea812ff", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42503", "issue:45027"], "result": {"analyst_result": {"best_issue_reason": "#45027 is the better standalone issue to keep as the representative item because it is open and has more discussion activity, but it should not subsume #42503 since the requested changes are different.", "best_pr_reason": null, "canonical_issue_reason": "No canonical duplicate issue exists here because the requests target different model families and different work items. If one issue must be chosen as the cluster anchor, #45027 is the more active open request, but it is not a duplicate of #42503.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:42503", "reason": "Different models and different requests: ModernVBERT model additions vs Voxtral-4B-TTS-2603 support. They do not describe the same underlying bug or change.", "right": "issue:45027"}], "summary": "These are two unrelated feature/support requests for different models, not duplicates: one asks to add ModernVBERT models, the other asks for Voxtral-4B-TTS-2603 support in Transformers."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42503|issue:45027"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3404, "estimated_input_tokens": 1574, "item_count": 6, "node_count": 6, "serialized_chars": 6293, "soft_pair_count": 8}, "cached_at": "2026-04-14T16:53:52Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d0cfaf367742fe61398bf9631a5d004a2a13657bcb02ec78049b55bb65263903", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:37934", "issue:42936", "issue:43240", "issue:43668", "issue:43813", "issue:44031"], "result": {"analyst_result": {"best_issue_reason": "#43240 is the best representative to keep because it describes a clear, actionable code-path bug with broader impact than the other titles.", "best_pr_reason": null, "canonical_issue_reason": "issue #43240 is the most substantive and broadly applicable bug report in the set; the others are narrower model-specific questions or simple typos/config issues.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:37934", "reason": "Different topics: a question about Llama4TextL2Norm behavior vs a ModernBERT config type-hint typo.", "right": "issue:43668"}, {"accept": false, "left": "issue:42936", "reason": "Both involve loss handling, but one is a specific Mask2Former ignore_value issue and the other is generic kwargs forwarding; not the same underlying bug.", "right": "issue:43240"}, {"accept": false, "left": "issue:42936", "reason": "Mask2Former loss plumbing and tokenizer regex warnings are unrelated subsystems.", "right": "issue:44031"}, {"accept": false, "left": "issue:42936", "reason": "Ignore_value usage in Mask2Former does not match a quantization-operation typo.", "right": "issue:43813"}, {"accept": false, "left": "issue:42936", "reason": "A model loss-value bug and a config type-hint error are not the same issue.", "right": "issue:43668"}, {"accept": false, "left": "issue:43240", "reason": "Loss kwargs forwarding vs a ModernBERTConfig annotation problem are unrelated.", "right": "issue:43668"}, {"accept": false, "left": "issue:43668", "reason": "Config type-hint correctness and tokenizer regex warnings are different bugs.", "right": "issue:44031"}, {"accept": false, "left": "issue:43668", "reason": "A type-hint typo and a quantization attribute typo are not the same underlying change or bug.", "right": "issue:43813"}], "summary": "These items are mostly unrelated issue reports with only superficial text similarity; they span model-specific behavior questions, loss-plumbing bugs, config type hints, tokenizer warnings, and a quantization typo."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:37934|issue:43668", "issue:42936|issue:43240", "issue:42936|issue:44031", "issue:42936|issue:43813", "issue:42936|issue:43668", "issue:43240|issue:43668", "issue:43668|issue:44031", "issue:43668|issue:43813"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 996, "estimated_input_tokens": 370, "item_count": 2, "node_count": 2, "serialized_chars": 1480, "soft_pair_count": 1}, "cached_at": "2026-04-14T16:54:01Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "36d252391682b9400e00aaa7e07ceea76fe07fea7180bea66a16b952807e48db", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41084", "issue:43037"], "result": {"analyst_result": {"best_issue_reason": "Neither issue is a good canonical duplicate target because they cover different topics; if forced, the DeepSeek support request is the clearer standalone feature issue.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:41084", "reason": "Titles and context are unrelated: decoding mechanics versus model support. No shared targets, files, or concrete code-path overlap suggesting the same underlying bug or change.", "right": "issue:43037"}], "summary": "These two issues do not appear to describe the same bug or change. One is about set/block decoding behavior, while the other is a feature request for DeepSeek v3.2 support."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:41084|issue:43037"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1048, "estimated_input_tokens": 396, "item_count": 2, "node_count": 2, "serialized_chars": 1584, "soft_pair_count": 1}, "cached_at": "2026-04-14T16:54:14Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4c85397e522b90c291c97ea8fe0690e3fdbf61dddabffc80427c6cdb9f4026fc", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43598", "issue:43600"], "result": {"analyst_result": {"best_issue_reason": "Issue 43598 is the better representative issue because it has a narrower, clearer fix target than the FLOPS-estimation question.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43598 is the more concrete, code-path-specific bug report around `remove_unused_column` and Trainer input customizability.", "canonical_pr_reason": null, "confidence": 0.86, "soft_edge_verdicts": [{"accept": false, "left": "issue:43598", "reason": "They share the broad theme of custom trainers, but they describe different problems: one concerns unused-column removal, the other concerns FLOPS counting. They would not plausibly be merged into one fix.", "right": "issue:43600"}], "summary": "These look like two related-but-distinct Trainer customization issues, not duplicates: one is about `remove_unused_column` behavior, the other about FLOPS estimation with custom trainers."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43598|issue:43600"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1062, "estimated_input_tokens": 403, "item_count": 2, "node_count": 2, "serialized_chars": 1612, "soft_pair_count": 1}, "cached_at": "2026-04-14T16:54:26Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5a78fc89c7b8f48c31df00548742e2133e00b822efd86cfa230a38826871a1a6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43684", "issue:44405"], "result": {"analyst_result": {"best_issue_reason": "#43684 is the best cluster representative because it covers the more general auto-model registration gap and is closer to the underlying framework-level change.", "best_pr_reason": null, "canonical_issue_reason": "#43684 is the broader, more representative request: it asks for Qwen3-Omni registration in AutoModel and AutoModelForConditionalGeneration, whereas #44405 is a narrower AutoModelForSequenceClassification request for Qwen3.5.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43684", "reason": "Different underlying requests: one is Qwen3-Omni registration for AutoModel/AutoModelForConditionalGeneration, the other is Qwen3.5 sequence-classification support. They involve different model families and different auto-class paths, so they should not be merged as duplicates.", "right": "issue:44405"}], "summary": "These are related only at a very broad theme level (Qwen auto-class support), but they target different model families and different auto-model entry points, so they are not duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43684|issue:44405"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2388, "estimated_input_tokens": 1066, "item_count": 4, "node_count": 4, "serialized_chars": 4264, "soft_pair_count": 6}, "cached_at": "2026-04-14T16:55:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9b6601050ed0cf3a8b2580b5a7a479f566d2ddfacef8f631ab51dd844838d5fe", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43835", "issue:43845", "issue:43846", "issue:43859"], "result": {"analyst_result": {"best_issue_reason": "issue:43845 is the strongest global issue candidate because it matches the repeated title pattern and is earliest in the apparent duplicate group.", "best_pr_reason": null, "canonical_issue_reason": "issue:43845 is the earliest of the three near-identical \"huggingface \" issues and is the best representative of that duplicate set.", "canonical_pr_reason": null, "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "issue:43835", "reason": "Different titles and no shared concrete bug details; insufficient evidence they are the same issue.", "right": "issue:43845"}, {"accept": false, "left": "issue:43835", "reason": "Different titles and no concrete overlap beyond generic similarity.", "right": "issue:43846"}, {"accept": false, "left": "issue:43835", "reason": "Different titles and no evidence of the same underlying report.", "right": "issue:43859"}, {"accept": true, "left": "issue:43845", "reason": "Near-identical title and very similar minimal content; these look like duplicate instances of the same placeholder issue.", "right": "issue:43846"}, {"accept": true, "left": "issue:43845", "reason": "Same title text and highly similar sparse reports, so they plausibly describe the same issue.", "right": "issue:43859"}, {"accept": true, "left": "issue:43846", "reason": "Same title text and highly similar sparse reports; likely duplicates of the same issue.", "right": "issue:43859"}], "summary": "The three \"huggingface \" issues appear to be the same duplicate placeholder report; the \"Fett\" issue is unrelated. I\u2019d keep the earliest huggingface issue as canonical."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is grounded in the packet: issue 43835 has a distinct title, while 43845/43846/43859 share the same title and close timestamps, so treating 43835 as unrelated and choosing 43845 as the canonical representative is conservative. The soft-edge accepts are also reasonably cautious given the identical titles and high soft-similarity scores."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["issue:43835|issue:43845", "issue:43835|issue:43846", "issue:43835|issue:43859", "issue:43845|issue:43846", "issue:43845|issue:43859", "issue:43846|issue:43859"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3154, "estimated_input_tokens": 1449, "item_count": 5, "node_count": 5, "serialized_chars": 5794, "soft_pair_count": 9}, "cached_at": "2026-04-14T16:55:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "54d98404267c588a904dd5216cc5d6e11ad2421c7b620102060eb4de0d7a1bcb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:44371", "issue:44418", "issue:44572", "issue:44573", "issue:44937"], "result": {"analyst_result": {"best_issue_reason": "issue:44418 is the least obviously spammy item, so it is the best available issue candidate, though still not a strong canonical target.", "best_pr_reason": null, "canonical_issue_reason": "issue:44418 is the only non-spam-looking entry, but it is still very vague and lacks a concrete bug/change description.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:44371", "reason": "Both are titled ''; identical spam labels are not enough to establish the same underlying issue.", "right": "issue:44572"}, {"accept": false, "left": "issue:44371", "reason": "Both are titled ''; there is no concrete bug report or shared technical detail linking them.", "right": "issue:44573"}, {"accept": false, "left": "issue:44371", "reason": "One is spam and the other is a generic documentation suggestion; no evidence they describe the same underlying problem.", "right": "issue:44418"}, {"accept": false, "left": "issue:44371", "reason": "'' and 'Check out \"Google LLC\"' are clearly unrelated on their face.", "right": "issue:44937"}, {"accept": false, "left": "issue:44418", "reason": "A vague documentation enhancement request does not match a spam issue; no shared problem statement.", "right": "issue:44572"}, {"accept": false, "left": "issue:44418", "reason": "A vague documentation enhancement request and a spam post are not the same issue.", "right": "issue:44573"}, {"accept": false, "left": "issue:44572", "reason": "Both are spam-like titles, but there is no evidence they refer to the same underlying bug or request.", "right": "issue:44573"}, {"accept": false, "left": "issue:44572", "reason": "Spam and an unrelated promotional-sounding title do not indicate a duplicate technical issue.", "right": "issue:44937"}, {"accept": false, "left": "issue:44573", "reason": "No concrete overlap; the titles suggest unrelated spam/promotional content.", "right": "issue:44937"}], "summary": "These are all low-signal, likely unrelated closed issues; most are spam or generic content, and none provide enough evidence to merge as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44371|issue:44572", "issue:44371|issue:44573", "issue:44371|issue:44418", "issue:44371|issue:44937", "issue:44418|issue:44572", "issue:44418|issue:44573", "issue:44572|issue:44573", "issue:44572|issue:44937", "issue:44573|issue:44937"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1080, "estimated_input_tokens": 412, "item_count": 2, "node_count": 2, "serialized_chars": 1646, "soft_pair_count": 1}, "cached_at": "2026-04-14T16:56:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b30d11fe36dc558e20432916bc564caef464d61d3317f1e46022a35b69e34017", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:45145", "issue:45151"], "result": {"analyst_result": {"best_issue_reason": "#45145 is the strongest issue to keep as the cluster representative for now due to its inbound reference; treat it as the provisional canonical issue rather than a proven duplicate match.", "best_pr_reason": null, "canonical_issue_reason": "#45145 is the better representative among the pair because it has the only inbound reference, but the evidence is not strong enough to assert a true duplicate without body-level confirmation.", "canonical_pr_reason": null, "confidence": 0.72, "soft_edge_verdicts": [{"accept": false, "left": "issue:45145", "reason": "Exact title match and soft similarity are suggestive, but there is no body or diff evidence here to confirm the same underlying issue. I would keep this as a possible duplicate, not a confirmed one.", "right": "issue:45151"}], "summary": "These issues share the exact title and have moderate soft similarity, but the packet does not include body text or other strong linkage evidence. They look related and possibly duplicative, but not confirmed as the same underlying report."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": false, "feedback": "The packet supports that these two closed issues have the same title and moderate soft similarity, but it does not provide their bodies, so claims like \u201csame content,\u201d \u201chighly similar body,\u201d or \u201cclear duplicate pair\u201d are too strong. The canonical choice of #45145 is plausible because it has one inbound reference, but the duplicate/identity conclusion should be stated more conservatively unless body evidence is available."}, "evaluator_used": true, "retried": true}, "soft_pairs": ["issue:45145|issue:45151"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3212, "estimated_input_tokens": 1478, "item_count": 2, "node_count": 2, "serialized_chars": 5909, "soft_pair_count": 1}, "cached_at": "2026-04-14T16:57:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8ea55604793af7d8cc77ce919d4894a1aca4932f2ec171d62ee211c733f2e414", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:26617", "pull_request:29095"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#29095 is the stronger representative PR in this cluster because it is closed and includes a broader integration footprint (docs, README updates, model docs, tokenizer/output artifacts), but it is still not duplicate-equivalent to #26617.", "canonical_issue_reason": null, "canonical_pr_reason": "No issue target here; if a representative PR is needed, #29095 is the better canonical choice because it is the more complete, closed end-to-end model-support PR, while #26617 is an open WIP for a different model.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:26617", "reason": "Different models and different underlying changes (CharacterBERT vs RWKV5). Shared auto/config/docs files are expected for any model addition and do not indicate the same bug or change.", "right": "pull_request:29095"}], "summary": "These are two unrelated model-addition PRs: one adds CharacterBERT, the other adds RWKV5. They share some generic Transformers integration files, but they target different models and code paths, so they should not be treated as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:26617|pull_request:29095"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20080, "estimated_input_tokens": 9912, "item_count": 18, "node_count": 18, "serialized_chars": 39645, "soft_pair_count": 12}, "cached_at": "2026-04-14T16:58:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7065f103d1bb271abefcd70441deda121e351028315caf008197dd40c26ba8f0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43242", "pull_request:43251", "pull_request:43254", "pull_request:43303", "pull_request:43524", "pull_request:43989", "pull_request:44080", "pull_request:44118", "pull_request:44192", "pull_request:44193", "pull_request:44341", "pull_request:44388", "pull_request:44400", "pull_request:44429", "pull_request:44470", "pull_request:44690", "pull_request:45132", "pull_request:45315"], "result": {"analyst_result": {"best_issue_reason": "Not applicable: there is no issue artifact spanning the full mixed cluster.", "best_pr_reason": "Not applicable: the cluster does not have one best PR overall; each subgroup has its own representative fix.", "canonical_issue_reason": "No single issue is present, and the items split into multiple unrelated bug threads rather than one coherent duplicate cluster.", "canonical_pr_reason": "No single PR can be the canonical representative for the whole set because the PRs cover several separate fixes in different code paths.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44470", "reason": "Same GLM-MOE-DSA indexer bug; both add the missing ReLU in the same code path and would merge cleanly as one fix.", "right": "pull_request:44690"}, {"accept": true, "left": "pull_request:44388", "reason": "Same loading_report non-TTY ANSI-style bug; both change the same formatting path and are alternative implementations of the same fix.", "right": "pull_request:44429"}, {"accept": true, "left": "pull_request:45132", "reason": "Same MoE router softmaxing bug; 45315 is a broader version of the same concrete change across more models.", "right": "pull_request:45315"}, {"accept": true, "left": "pull_request:43242", "reason": "Both fix fixed_cross_entropy dropping supported cross_entropy kwargs; they target the same underlying API-forwarding bug.", "right": "pull_request:43251"}, {"accept": true, "left": "pull_request:44080", "reason": "Both address ModelOutput fields not syncing into the mapping when a previously-None dataclass field is assigned later.", "right": "pull_request:44118"}, {"accept": true, "left": "pull_request:44341", "reason": "Same loading_report ANSI handling issue when stdout is not a TTY; same file, same bug, same output path.", "right": "pull_request:44388"}, {"accept": true, "left": "pull_request:43989", "reason": "Both fix the AutoVideoProcessor class lookup crash when torchvision is unavailable by skipping None extractor mappings.", "right": "pull_request:44193"}, {"accept": true, "left": "pull_request:44341", "reason": "Same non-TTY formatting bug in loading_report; both PRs modify the same ANSI-style emission logic.", "right": "pull_request:44429"}, {"accept": true, "left": "pull_request:43303", "reason": "Same DeepSpeed weight-conversion path; 43524 is a follow-up refinement to the helper introduced in 43303 for the same state-dict conversion problem.", "right": "pull_request:43524"}, {"accept": true, "left": "pull_request:44118", "reason": "Both fix the same ModelOutput key-sync bug for previously-None dataclass fields, using different but equivalent implementation strategies.", "right": "pull_request:44192"}, {"accept": true, "left": "pull_request:43251", "reason": "Same fixed_cross_entropy kwargs-forwarding issue; both add support for weight/label_smoothing in the same loss helper.", "right": "pull_request:43254"}, {"accept": true, "left": "pull_request:44341", "reason": "Same loading_report non-TTY ANSI formatting bug; both are direct fixes to the same helper behavior.", "right": "pull_request:44400"}], "summary": "This is a mixed cluster made up of several distinct duplicate groups: cross-entropy kwarg forwarding, ModelOutput key syncing, loading_report ANSI handling, AutoVideoProcessor lookup, DeepSpeed weight conversion, GLM-MOE-DSA ReLU insertion, and MoE router softmax fixes. There is no single canonical artifact for the whole set, but several soft edges are true duplicates within their subgroup."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44470|pull_request:44690", "pull_request:44388|pull_request:44429", "pull_request:45132|pull_request:45315", "pull_request:43242|pull_request:43251", "pull_request:44080|pull_request:44118", "pull_request:44341|pull_request:44388", "pull_request:43989|pull_request:44193", "pull_request:44341|pull_request:44429", "pull_request:43303|pull_request:43524", "pull_request:44118|pull_request:44192", "pull_request:43251|pull_request:43254", "pull_request:44341|pull_request:44400"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18968, "estimated_input_tokens": 9356, "item_count": 17, "node_count": 17, "serialized_chars": 37421, "soft_pair_count": 10}, "cached_at": "2026-04-14T16:59:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c4abe4832594bcf5f0bfcea7c969bace42c54f3f5538f869d35497f118a590ea", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43242", "pull_request:43254", "pull_request:43303", "pull_request:43524", "pull_request:43926", "pull_request:44080", "pull_request:44192", "pull_request:44194", "pull_request:44203", "pull_request:44334", "pull_request:44470", "pull_request:44564", "pull_request:44663", "pull_request:44690", "pull_request:45111", "pull_request:45131", "pull_request:45132"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45131 is the strongest single PR in the set because it fixes the central router-logits/probabilities mistake across several model families and is already merged.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45131 is the best representative overall: it is merged, addresses the core MoE router bug, and covers multiple concrete model implementations with tests.", "confidence": 0.79, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44334", "reason": "Same area and same issue target, but these are duplicate-style iterations on the CLI fix rather than the same concrete change.", "right": "pull_request:44663"}, {"accept": false, "left": "pull_request:44194", "reason": "Both touch optimizer args, but they implement different strategies and scopes; not the same exact fix.", "right": "pull_request:44203"}, {"accept": false, "left": "pull_request:44080", "reason": "Both concern ModelOutput assignment, but the code paths and fixes differ enough that they are not the same underlying change.", "right": "pull_request:44192"}, {"accept": false, "left": "pull_request:43242", "reason": "These are two distinct patches for fixed_cross_entropy: one forwards kwargs generically, the other exposes specific parameters in the signature.", "right": "pull_request:43254"}, {"accept": false, "left": "pull_request:44564", "reason": "Same model family and same symptom, but the later PR is a narrower ReLU implementation tweak, not the same concrete change as the earlier fix.", "right": "pull_request:44690"}, {"accept": false, "left": "pull_request:43303", "reason": "Both are DeepSpeed weight-conversion work, but 43926 is a later correction to the earlier machinery, so they are related follow-ups rather than duplicates.", "right": "pull_request:43926"}, {"accept": false, "left": "pull_request:44470", "reason": "The two GLM-MoE-DSA PRs overlap in topic, but one is the first ReLU addition and the other is a later implementation refinement; not mergeable as one PR.", "right": "pull_request:44564"}, {"accept": false, "left": "pull_request:43524", "reason": "Same file and issue target, but they address different stages of the DeepSpeed conversion logic and are not the same underlying bug fix.", "right": "pull_request:43926"}, {"accept": false, "left": "pull_request:45111", "reason": "Both touch MoE router probabilities, but 45111 targets a broader set of routers while 45132 is a narrower subset; overlapping topic is not enough for duplicate status.", "right": "pull_request:45132"}, {"accept": false, "left": "pull_request:45131", "reason": "45131 is the broader, merged multi-model fix; 45132 is a narrower variant and not the same concrete change.", "right": "pull_request:45132"}], "summary": "The items split into several unrelated PR threads, with no true duplicates among the soft-edge candidates. Most shared-issue pairs are alternative or follow-up implementations, and the two MoE router PRs overlap conceptually but fix different scopes/models and shouldn\u2019t be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44334|pull_request:44663", "pull_request:44194|pull_request:44203", "pull_request:44080|pull_request:44192", "pull_request:43242|pull_request:43254", "pull_request:44564|pull_request:44690", "pull_request:43303|pull_request:43926", "pull_request:44470|pull_request:44564", "pull_request:43524|pull_request:43926", "pull_request:45111|pull_request:45132", "pull_request:45131|pull_request:45132"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20386, "estimated_input_tokens": 10065, "item_count": 17, "node_count": 17, "serialized_chars": 40257, "soft_pair_count": 15}, "cached_at": "2026-04-14T16:59:59Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b34d2ad38932240bd52cb932e47156e50c5eb11df519e5fab8bc911f43f87dfd", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42781", "pull_request:43400", "pull_request:43403", "pull_request:43453", "pull_request:43580", "pull_request:43998", "pull_request:44000", "pull_request:44001", "pull_request:44002", "pull_request:44003", "pull_request:44004", "pull_request:44028", "pull_request:44439", "pull_request:44848", "pull_request:44934", "pull_request:45048", "pull_request:45053"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43400 is the strongest standalone representative in this cluster because it is a complete, merged feature addition with implementation and tests; the other PRs are either unrelated test fixes or separate model refactors.", "canonical_issue_reason": null, "canonical_pr_reason": "No issues are present. Among the PRs, #43400 is the best representative of the only clearly related subthread (VibeVoice acoustic tokenizer): it is merged, substantial, and includes code, docs, and tests for a coherent feature addition.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43453", "reason": "Both are test-fix PRs, but they target different models and different problems: missing pad_token_id vs speculative generation expectations. No shared code path.", "right": "pull_request:44934"}, {"accept": false, "left": "pull_request:43998", "reason": "Same broad theme of output/refactor work, but different models and different mechanics. Not the same underlying change or bug.", "right": "pull_request:44003"}, {"accept": false, "left": "pull_request:43453", "reason": "Unrelated integration-test adjustments for different models; one fixes pad_token_id setup, the other changes generation expectations/sample behavior.", "right": "pull_request:45048"}, {"accept": false, "left": "pull_request:44001", "reason": "Both touch output-tracing style code, but for different architectures and code paths (UnivNet vs Mamba/FalconMamba). Too broad to merge as duplicates.", "right": "pull_request:44003"}, {"accept": false, "left": "pull_request:44002", "reason": "Different model families and different forward/output handling changes; not the same fix.", "right": "pull_request:44003"}, {"accept": false, "left": "pull_request:44000", "reason": "Both are refactors around model outputs, but they affect different models and forward signatures. No concrete shared bug.", "right": "pull_request:44003"}, {"accept": false, "left": "pull_request:44003", "reason": "Both are output-tracing related, but one is Mamba and the other is SuperPoint; separate model-specific changes.", "right": "pull_request:44028"}, {"accept": false, "left": "pull_request:43403", "reason": "Same test file, but the fixes are different: one handles missing image_sizes, the other disables torch.export. They are not the same bug.", "right": "pull_request:43580"}, {"accept": false, "left": "pull_request:44848", "reason": "Different subsystems entirely: generation meta-device handling vs Qwen2/T5 integration-test expectations.", "right": "pull_request:44934"}, {"accept": false, "left": "pull_request:44934", "reason": "Both are test-related, but for different models and different failures; no shared concrete bug or code path.", "right": "pull_request:45053"}, {"accept": false, "left": "pull_request:44003", "reason": "Same general refactor label, but they modify different model implementations and different output contracts.", "right": "pull_request:44004"}, {"accept": false, "left": "pull_request:44000", "reason": "Different models and different output/refactor changes; only superficial similarity in title.", "right": "pull_request:44004"}, {"accept": false, "left": "pull_request:44439", "reason": "Separate integration-test fixes for different models with no shared underlying issue.", "right": "pull_request:44934"}, {"accept": false, "left": "pull_request:42781", "reason": "Related VibeVoice family work, but not the same change: #42781 adds the main realtime model, while #43400 adds the acoustic tokenizer component. They could coexist, but they are not duplicates.", "right": "pull_request:43400"}], "summary": "The cluster is mostly a set of unrelated PRs that only share superficial wording like \u201crefactor output tracing\u201d or \u201cfix failing integration test.\u201d None of the soft pairs appear to be the same underlying bug/change; at most they are adjacent work in the same model family or the same test file."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43453|pull_request:44934", "pull_request:43998|pull_request:44003", "pull_request:43453|pull_request:45048", "pull_request:44001|pull_request:44003", "pull_request:44002|pull_request:44003", "pull_request:44000|pull_request:44003", "pull_request:44003|pull_request:44028", "pull_request:43403|pull_request:43580", "pull_request:44848|pull_request:44934", "pull_request:44934|pull_request:45053", "pull_request:44003|pull_request:44004", "pull_request:44000|pull_request:44004", "pull_request:44439|pull_request:44934", "pull_request:42781|pull_request:43400", "pull_request:43453|pull_request:44439"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22762, "estimated_input_tokens": 11253, "item_count": 18, "node_count": 18, "serialized_chars": 45010, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:00:33Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9de99ef23ce512fc7b8ddc124607f9e1fdce317066029a610e5af61af89bdaf5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40546", "pull_request:43030", "pull_request:43349", "pull_request:43400", "pull_request:43438", "pull_request:43453", "pull_request:43764", "pull_request:44030", "pull_request:44095", "pull_request:44330", "pull_request:44456", "pull_request:44515", "pull_request:44519", "pull_request:44675", "pull_request:44759", "pull_request:44828", "pull_request:44848", "pull_request:45336"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:44848 is the best single representative: it is a substantive merged fix with concrete code-path impact, clear title, and meaningful discussion/activity. It is still not a duplicate of the rest, but it is the most canonical-looking PR in this set.", "canonical_issue_reason": null, "canonical_pr_reason": "No issue in this cluster looks like a clear canonical duplicate target; the items are mostly independent PRs. If forced to pick a representative PR, 44848 is the strongest standalone bug-fix/change candidate because it contains a concrete runtime fix in generation/utils and a targeted model update.", "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44759", "reason": "Both are part of a broad cache_position cleanup across different model sets, but they affect different files/models and are separate batched refactors, not the same concrete code-path bug.", "right": "pull_request:44828"}, {"accept": false, "left": "pull_request:43453", "reason": "One fixes missing pad_token_id attributes across configs; the other fixes generation device handling/meta-device behavior. Different bugs and unrelated code paths.", "right": "pull_request:44848"}, {"accept": false, "left": "pull_request:43349", "reason": "Finegrained FP8 device-sync cleanup and batched_mm CPU performance are different performance issues in different subsystems.", "right": "pull_request:43438"}, {"accept": false, "left": "pull_request:44519", "reason": "Marian integration test adjustment is a test expectation fix, while 44848 is a generation/runtime bug fix. Not the same underlying change.", "right": "pull_request:44848"}, {"accept": false, "left": "pull_request:40546", "reason": "Both are VibeVoice-related, but one implements the main model and the other adds the acoustic tokenizer. Different components and different changes.", "right": "pull_request:43400"}, {"accept": false, "left": "pull_request:44456", "reason": "DepthPro test stabilization and Qwen3 Omni MoE generation/device handling are unrelated.", "right": "pull_request:44848"}, {"accept": false, "left": "pull_request:44515", "reason": "GPTNeo generation test fix is unrelated to the Qwen3 Omni MoE generation/meta-device fix.", "right": "pull_request:44848"}, {"accept": false, "left": "pull_request:44095", "reason": "Both touch loading/initialization logic, but one fixes general model loading initialization and the other removes shared weights in Gemma4. Different concrete problems.", "right": "pull_request:45336"}, {"accept": false, "left": "pull_request:43030", "reason": "Both are ModernBERT attention-related, but one refactors the attention interface while the other changes the default attention implementation. Related area, but not the same duplicate fix.", "right": "pull_request:43764"}, {"accept": false, "left": "pull_request:44330", "reason": "Both remove cache_position from many models, but they are separate broad sweeps across different model groups rather than one identical code-path fix.", "right": "pull_request:44759"}, {"accept": false, "left": "pull_request:44030", "reason": "DPR output-tracing refactor and continuous batching docs/config cleanup are unrelated.", "right": "pull_request:44675"}], "summary": "The cluster is mostly a grab-bag of unrelated PRs: model additions, doc/test fixes, and several separate runtime refactors. A few pairs are superficially similar (especially the cache_position removals), but they still look like separate batched changes rather than the same duplicate fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44759|pull_request:44828", "pull_request:43453|pull_request:44848", "pull_request:43349|pull_request:43438", "pull_request:44519|pull_request:44848", "pull_request:40546|pull_request:43400", "pull_request:44456|pull_request:44848", "pull_request:44515|pull_request:44848", "pull_request:44095|pull_request:45336", "pull_request:43030|pull_request:43764", "pull_request:44330|pull_request:44759", "pull_request:44030|pull_request:44675"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22928, "estimated_input_tokens": 11336, "item_count": 18, "node_count": 18, "serialized_chars": 45343, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:01:01Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "749bbd6276ca0bd79269f63fe71bafb39bcd822577c3d3879de014b28cb40003", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40546", "pull_request:42781", "pull_request:43030", "pull_request:43426", "pull_request:43453", "pull_request:43672", "pull_request:44000", "pull_request:44002", "pull_request:44029", "pull_request:44414", "pull_request:44527", "pull_request:44595", "pull_request:44662", "pull_request:44675", "pull_request:45212", "pull_request:45214", "pull_request:45340", "pull_request:45401"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45401 is the strongest standalone candidate because it has an explicit issue target, a focused scope, and is not just a doc/refactor-only change.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR truly represents the cluster because the items are thematically unrelated; if forced to choose one, PR 45401 is the most self-contained and explicitly issue-linked model-addition PR.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44662", "reason": "Both add different vision-language models (PenguinVL vs Voxtral TTS). They share scaffolding files, but not the same code-path problem or mergeable change.", "right": "pull_request:45401"}, {"accept": false, "left": "pull_request:43030", "reason": "Different fixes in different models: ModernBERT attention interface refactor vs PE Audio logit device placement bug. No shared underlying issue.", "right": "pull_request:43672"}, {"accept": false, "left": "pull_request:40546", "reason": "Both concern VibeVoice, but one adds the base implementation and the other adds realtime/tokenizer support. These are separate features, not one duplicate fix.", "right": "pull_request:42781"}, {"accept": false, "left": "pull_request:44000", "reason": "Different model refactors in unrelated components; shared output-tracing/docs style is too broad to indicate the same change.", "right": "pull_request:44675"}, {"accept": false, "left": "pull_request:44002", "reason": "UperNet output-tracing refactor and docs/config cleanup are unrelated changes despite both touching model APIs.", "right": "pull_request:44675"}, {"accept": false, "left": "pull_request:44414", "reason": "Both touch loading/conversion infrastructure, but one reduces tqdm verbosity during loading while the other fixes conversion mappings. Separate bugs.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44029", "reason": "RWKV output-tracing refactor and a docs/config-only change in generation config are not the same underlying issue.", "right": "pull_request:44675"}, {"accept": false, "left": "pull_request:43453", "reason": "One fixes missing pad_token_id attributes across configs; the other changes MusicgenStereo test setup. Not the same bug.", "right": "pull_request:44527"}, {"accept": false, "left": "pull_request:45212", "reason": "Both are audio-related test/model fixes, but they address different models and different failure modes (fixture/device test support vs position embedding device placement).", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenizers cleanup behavior and VLM conversion mapping fixes are unrelated changes with no common code-path.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44595", "reason": "Both are model-addition PRs, but for different models (CHMv2 vs Voxtral TTS). They cannot plausibly be merged as one duplicate change.", "right": "pull_request:45401"}], "summary": "This cluster is not a duplicate set: it mixes unrelated model additions, documentation-only changes, and independent bugfix/refactor PRs across different subsystems. No pair looks like the same underlying change or fix, so all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44662|pull_request:45401", "pull_request:43030|pull_request:43672", "pull_request:40546|pull_request:42781", "pull_request:44000|pull_request:44675", "pull_request:44002|pull_request:44675", "pull_request:44414|pull_request:45340", "pull_request:44029|pull_request:44675", "pull_request:43453|pull_request:44527", "pull_request:45212|pull_request:45214", "pull_request:43426|pull_request:45340", "pull_request:44595|pull_request:45401"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20858, "estimated_input_tokens": 10301, "item_count": 18, "node_count": 18, "serialized_chars": 41203, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:01:37Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5670dbbfab55d7ebc1d794fcbf0dd82d28ede1b963e7d13053882bf9290e065d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43339", "pull_request:43426", "pull_request:43453", "pull_request:43464", "pull_request:43592", "pull_request:43615", "pull_request:44001", "pull_request:44095", "pull_request:44229", "pull_request:44414", "pull_request:44497", "pull_request:44662", "pull_request:44675", "pull_request:44801", "pull_request:44934", "pull_request:45044", "pull_request:45075", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No PR is meaningfully central to the cluster. If forced, 45340 is the broadest infra-oriented change, but it still does not represent the rest of the set.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR is a good canonical representative; the items span unrelated changes across different models, docs, tests, and core loading/tokenization behavior.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43453", "reason": "Different models and different fixes: config default values for several architectures vs a MarkupLM integration test dtype adjustment.", "right": "pull_request:43464"}, {"accept": false, "left": "pull_request:44095", "reason": "Both touch modeling_utils, but they address different concrete bugs: remote-code weight initialization vs tie-weights behavior when both weights already exist.", "right": "pull_request:44497"}, {"accept": false, "left": "pull_request:44001", "reason": "UnivNet output tracing refactor is unrelated to docs-only updates for continuous batching configuration.", "right": "pull_request:44675"}, {"accept": false, "left": "pull_request:44662", "reason": "Both are model-implementation PRs, but they add different models with different codepaths and cannot plausibly be merged as one fix.", "right": "pull_request:45075"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenization cleanup behavior and model-loading tqdm verbosity are unrelated changes.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:45044", "reason": "Janus image-generation bugfix and VLM conversion-mapping updates are separate subsystems with no shared concrete defect.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43339", "reason": "Both add XPU test expectations, but for different models and different outputs; this is not the same underlying bug or change.", "right": "pull_request:43615"}, {"accept": false, "left": "pull_request:44229", "reason": "A docstring backtick fix is unrelated to continuous batching docs updates.", "right": "pull_request:44675"}, {"accept": false, "left": "pull_request:43592", "reason": "Configuration default-value fixes across models do not match conversion-mapping changes for VLMs.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44801", "reason": "Tokenizer-class hub mapping fixes for DeepSeek/ModernBERT are unrelated to conversion-mapping changes for VLMs.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43464", "reason": "These are separate integration-test fixes for different models and different failure modes.", "right": "pull_request:44934"}], "summary": "The cluster is heterogeneous: model/test expectation tweaks, docs, tokenizer behavior, config defaults, loading internals, and two separate model-addition PRs. I do not see any pair that fixes the same concrete bug or change, so no soft edges should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43453|pull_request:43464", "pull_request:44095|pull_request:44497", "pull_request:44001|pull_request:44675", "pull_request:44662|pull_request:45075", "pull_request:43426|pull_request:44414", "pull_request:45044|pull_request:45340", "pull_request:43339|pull_request:43615", "pull_request:44229|pull_request:44675", "pull_request:43592|pull_request:45340", "pull_request:44801|pull_request:45340", "pull_request:43464|pull_request:44934"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19212, "estimated_input_tokens": 9478, "item_count": 18, "node_count": 18, "serialized_chars": 37911, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:02:13Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7c497ee8efc52980b45cc450bbb62189a0032a7df54fcbc6c27cca5660d94033", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43030", "pull_request:43324", "pull_request:43426", "pull_request:43579", "pull_request:43614", "pull_request:43615", "pull_request:43821", "pull_request:43861", "pull_request:43938", "pull_request:43956", "pull_request:44037", "pull_request:44321", "pull_request:44353", "pull_request:44428", "pull_request:44482", "pull_request:44536", "pull_request:44801", "pull_request:45214"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44037 is the best overall PR in this set because it is a merged, substantive code fix to the real code path rather than a test-only expectation update or skip-only change.", "canonical_issue_reason": null, "canonical_pr_reason": "44037 is the strongest representative: it is merged, touches the core qwen3_vl_moe conversion mapping, and contains the more complete fix/validation for that concrete bug.", "confidence": 0.9, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43956", "reason": "Both address the same qwen3_vl_moe weight-mapping bug in conversion_mapping/tests; 44037 is a more complete follow-up to the same underlying fix.", "right": "pull_request:44037"}, {"accept": false, "left": "pull_request:43324", "reason": "Both are test expectation updates for different models on XPU; same theme, but not the same bug or change.", "right": "pull_request:44321"}, {"accept": false, "left": "pull_request:43614", "reason": "Different models and different fixes; both are XPU-related test/model adjustments, but not the same underlying change.", "right": "pull_request:44353"}, {"accept": false, "left": "pull_request:43030", "reason": "ModernBERT attention refactor/docs versus tokenizer-class hub metadata fix; these are separate changes and code paths.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:43579", "reason": "Unrelated models and changes: solar_open test expectations versus tensor-parallel skip logic.", "right": "pull_request:44536"}, {"accept": true, "left": "pull_request:43821", "reason": "Exact same PEFT typo fix in the same file and code path; 43861 just adds an explicit issue link.", "right": "pull_request:43861"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenizer backend cleanup behavior versus tokenizer class mapping on the hub are different bugs and fixes.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:43615", "reason": "Different models and unrelated test/output fixes; only the broad XPU/testing context overlaps.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:43614", "reason": "DiffLlama XPU contiguity fix and Higgs audio XPU expectations are separate model-specific issues.", "right": "pull_request:44482"}, {"accept": false, "left": "pull_request:43615", "reason": "Different model test expectation updates; they do not fix the same concrete problem.", "right": "pull_request:43938"}, {"accept": false, "left": "pull_request:43614", "reason": "Different models and different failure modes; one is a code-path fix, the other is fixture/test expectation work.", "right": "pull_request:44428"}], "summary": "The cluster is mostly a collection of unrelated model-specific test/support PRs. The only strong duplicate pair is the PEFT quantization typo fix, and the qwen3_vl_moe mapping PRs are closely related enough to treat as the same underlying fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43956|pull_request:44037", "pull_request:43324|pull_request:44321", "pull_request:43614|pull_request:44353", "pull_request:43030|pull_request:44801", "pull_request:43579|pull_request:44536", "pull_request:43821|pull_request:43861", "pull_request:43426|pull_request:44801", "pull_request:43615|pull_request:45214", "pull_request:43614|pull_request:44482", "pull_request:43615|pull_request:43938", "pull_request:43614|pull_request:44428"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18832, "estimated_input_tokens": 9288, "item_count": 18, "node_count": 18, "serialized_chars": 37150, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:03:00Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ae629e812a0b8800e824bb3b35a5b14980453fab314a0e6581430f638a27dae4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42802", "pull_request:43339", "pull_request:43426", "pull_request:43464", "pull_request:43614", "pull_request:43896", "pull_request:44236", "pull_request:44250", "pull_request:44285", "pull_request:44320", "pull_request:44456", "pull_request:44490", "pull_request:44502", "pull_request:44515", "pull_request:44733", "pull_request:45049", "pull_request:45212", "pull_request:45284"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44320 is the strongest single PR candidate by scope and completeness: it adds a full model integration with docs, auto mappings, tests, and an explicit target issue. It is still not duplicative of the other PRs.", "canonical_issue_reason": null, "canonical_pr_reason": "No true canonical PR stands out because the items are unrelated. If one must be chosen, 44320 is the broadest substantive PR here, but it is a standalone model-addition feature, not a duplicate of the others.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43614", "reason": "Different fixes: DiffLlama contiguous-input handling for XPU compile vs flash-attn kernel fallback plus Qwen2/Jamba test expectations.", "right": "pull_request:44733"}, {"accept": false, "left": "pull_request:43426", "reason": "Unrelated code paths: tokenizer cleanup behavior vs TrainingArguments `report_to=\"all\"` regression.", "right": "pull_request:44250"}, {"accept": false, "left": "pull_request:43896", "reason": "DAC expected-output updates vs a torch.mlu availability guard; different subsystems and bugs.", "right": "pull_request:44502"}, {"accept": false, "left": "pull_request:43896", "reason": "Both touch test expectations, but for different models and different hardware; not the same underlying bug.", "right": "pull_request:45284"}, {"accept": false, "left": "pull_request:43339", "reason": "Both are XPU-related test changes, but they target different models and different failure modes.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:44236", "reason": "Deepspeed Zero3 init config fix vs RoPE-parameter kwargs handling; unrelated functionality.", "right": "pull_request:45049"}, {"accept": false, "left": "pull_request:44285", "reason": "Both add new model support, but VidEoMT and SAM3-LiteText are distinct models with separate codepaths and docs.", "right": "pull_request:44320"}, {"accept": false, "left": "pull_request:43464", "reason": "Similar integration-test symptom, but MarkupLM and DepthPro are different models with independent dtype fixes.", "right": "pull_request:44456"}, {"accept": false, "left": "pull_request:43464", "reason": "MarkupLM integration-test dtype fix vs GPTNeo generation test adjustment; unrelated model/test code.", "right": "pull_request:44515"}, {"accept": false, "left": "pull_request:42802", "reason": "Lasr disables flex attention due padding-mask incompatibility; EuroBERT fixes attention-mask device placement for model parallelism.", "right": "pull_request:44490"}], "summary": "This cluster is a grab bag of unrelated model fixes, test expectation updates, utility tweaks, and new model additions; none of the soft pairs look like the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43614|pull_request:44733", "pull_request:43426|pull_request:44250", "pull_request:43896|pull_request:44502", "pull_request:43896|pull_request:45284", "pull_request:43339|pull_request:45212", "pull_request:44236|pull_request:45049", "pull_request:44285|pull_request:44320", "pull_request:43464|pull_request:44456", "pull_request:43464|pull_request:44515", "pull_request:42802|pull_request:44490"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18476, "estimated_input_tokens": 9110, "item_count": 17, "node_count": 17, "serialized_chars": 36439, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:03:33Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "cc3e8683b9146a9bd74ac0a6eebb92195b37f7775bf5f21e93df0414d35cc56c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42230", "pull_request:43578", "pull_request:43614", "pull_request:43635", "pull_request:43896", "pull_request:44001", "pull_request:44002", "pull_request:44025", "pull_request:44026", "pull_request:44033", "pull_request:44229", "pull_request:44235", "pull_request:44321", "pull_request:44414", "pull_request:44801", "pull_request:44833", "pull_request:45214"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR: the items split into separate, unrelated changes across different models, tests, and CI workflows, so there is no duplicate center to anchor the cluster.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44321", "reason": "Different models and failures: VoxtralRealtime test skips vs a Cohere ASR beam-search device fix. Not the same bug or change.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:44235", "reason": "Both touch tokenizer auto-registration, but they fix different model mappings and different hub issues; not one concrete change.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:44001", "reason": "Both are refactors in different model families; same pattern of can_return_tuple, but not the same code-path problem.", "right": "pull_request:44229"}, {"accept": false, "left": "pull_request:44002", "reason": "Different models and forward signatures; similar refactor style only, no shared underlying bug.", "right": "pull_request:44229"}, {"accept": false, "left": "pull_request:44025", "reason": "Depth Anything and AnyToAny are unrelated model changes; no common duplicate change beyond broad tracing cleanup.", "right": "pull_request:44229"}, {"accept": false, "left": "pull_request:44026", "reason": "VisionEncoderDecoder refactor is separate from AnyToAny docstring/signature cleanup; not mergeable as one fix.", "right": "pull_request:44229"}, {"accept": false, "left": "pull_request:42230", "reason": "Both mention XPU, but one changes attention-mask handling and the other adds contiguity in DiffLlama; distinct bugs and code paths.", "right": "pull_request:43614"}, {"accept": false, "left": "pull_request:43896", "reason": "DAC expected-output updates and modular converter regeneration are unrelated maintenance changes.", "right": "pull_request:44833"}, {"accept": false, "left": "pull_request:43635", "reason": "Both are CI/comment workflow tweaks, but they address different feedback paths and concerns; not the same concrete change.", "right": "pull_request:44033"}, {"accept": false, "left": "pull_request:43578", "reason": "Youtu test repo alignment and model-loading tqdm verbosity are unrelated changes.", "right": "pull_request:44414"}], "summary": "This cluster is heterogeneous: it contains unrelated model fixes, tokenizer metadata updates, CI/workflow changes, and test-only updates. The soft pairs are only loosely similar by subsystem or file pattern, not by the same underlying bug/change, so none should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44321|pull_request:45214", "pull_request:44235|pull_request:44801", "pull_request:44001|pull_request:44229", "pull_request:44002|pull_request:44229", "pull_request:44025|pull_request:44229", "pull_request:44026|pull_request:44229", "pull_request:42230|pull_request:43614", "pull_request:43896|pull_request:44833", "pull_request:43635|pull_request:44033", "pull_request:43578|pull_request:44414"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18428, "estimated_input_tokens": 9086, "item_count": 17, "node_count": 17, "serialized_chars": 36342, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:04:34Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7d346a42aa43940e196c7c089ea2926d7bd814df741a88b4294ed92d888e244a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41912", "pull_request:42668", "pull_request:43426", "pull_request:43500", "pull_request:43553", "pull_request:43554", "pull_request:43565", "pull_request:43578", "pull_request:43580", "pull_request:43953", "pull_request:43956", "pull_request:44414", "pull_request:44417", "pull_request:45061", "pull_request:45075", "pull_request:45401", "pull_request:45433"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No global best PR for duplicate triage; the items do not converge on one underlying bug or feature, so any single PR would be a poor representative of the cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR stands out: the cluster is heterogeneous, and the closest-looking pair (Qwen3-VL-MoE conversion) still appears to be different implementation approaches rather than one duplicate change.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45075", "reason": "Both add new model integrations, but they are for different models with different code and docs; same broad pattern is not enough for duplication.", "right": "pull_request:45401"}, {"accept": false, "left": "pull_request:43426", "reason": "Unrelated changes: tokenizer cleanup behavior versus a test repo/name update for Youtu.", "right": "pull_request:43578"}, {"accept": false, "left": "pull_request:43500", "reason": "Both touch bot/workflow-related files, but one is a test-file permission check hack and the other changes workflow permissions; not the same underlying change.", "right": "pull_request:43565"}, {"accept": false, "left": "pull_request:43500", "reason": "The first is a permission-check test tweak, while the second adds a new workflow file; different concrete changes.", "right": "pull_request:43553"}, {"accept": false, "left": "pull_request:43953", "reason": "Same model and file, but one redirects qwen3_vl_moe to qwen2_moe and removes its converters, while the other refines the dedicated qwen3_vl_moe weight converters; these are not the same fix.", "right": "pull_request:43956"}, {"accept": false, "left": "pull_request:41912", "reason": "Restore hidden-state dtype in T5-family modules versus skipping torch export tests for a different model; unrelated.", "right": "pull_request:43580"}, {"accept": false, "left": "pull_request:43500", "reason": "Same workflow area, but one is a bot permission check and the other changes style-bot behavior; not the same concrete bug/change.", "right": "pull_request:43554"}, {"accept": false, "left": "pull_request:42668", "reason": "Processor auto-loading robustness versus tqdm verbosity during model loading; different code paths and goals.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:44414", "reason": "Model-loading progress logging versus flash-attn compatibility/deprecation utilities; unrelated.", "right": "pull_request:45061"}, {"accept": false, "left": "pull_request:44417", "reason": "Neuron kernel integration and SonicMoe integration are separate feature additions with different code paths and targets.", "right": "pull_request:45433"}], "summary": "Mostly unrelated PRs across different models, workflows, and integrations. The only superficially similar pair is the Qwen3-VL-MoE conversion work, but the patches are alternative fixes rather than the same concrete change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45075|pull_request:45401", "pull_request:43426|pull_request:43578", "pull_request:43500|pull_request:43565", "pull_request:43500|pull_request:43553", "pull_request:43953|pull_request:43956", "pull_request:41912|pull_request:43580", "pull_request:43500|pull_request:43554", "pull_request:42668|pull_request:44414", "pull_request:44414|pull_request:45061", "pull_request:44417|pull_request:45433"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19166, "estimated_input_tokens": 9455, "item_count": 17, "node_count": 17, "serialized_chars": 37819, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:05:13Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b124c7fc06a376f06dcb5572c1799c1660e91479de55e9e5e39fed9e50a60321", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43071", "pull_request:43403", "pull_request:43438", "pull_request:43615", "pull_request:44251", "pull_request:44285", "pull_request:44300", "pull_request:44412", "pull_request:44456", "pull_request:44482", "pull_request:44490", "pull_request:44497", "pull_request:44515", "pull_request:44519", "pull_request:44566", "pull_request:44675", "pull_request:45336"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No best PR for deduplication purposes; any representative would be misleading because the cluster is heterogeneous and the soft similarities are mostly superficial (shared filenames or broad subsystem overlap).", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR: the cluster mixes unrelated model additions, test-only fixes, typing chores, and conversion logic changes, with no pair that clearly shares the same concrete code-path fix.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43615", "reason": "Different test fixes for different models (MixedInt8 GPT-2 vs Higgs Audio v2); same theme of expected outputs, but not the same bug or code path.", "right": "pull_request:44482"}, {"accept": false, "left": "pull_request:44251", "reason": "Both add new models/docs, but for completely different models (Jina-Embeddings-V3 vs VidEoMT) and different code changes.", "right": "pull_request:44285"}, {"accept": false, "left": "pull_request:43403", "reason": "Unrelated fixes: a LightOn OCR test helper change versus a EuroBERT model-parallelism bug fix.", "right": "pull_request:44490"}, {"accept": false, "left": "pull_request:44412", "reason": "Both touch typing infrastructure, but they address different scopes and problems (quantizers type checking vs extending typing into CLI/processing utilities).", "right": "pull_request:44566"}, {"accept": false, "left": "pull_request:44497", "reason": "Different weight-handling issues: generic tie-weights behavior vs Gemma4-specific shared-weight removal/loading behavior.", "right": "pull_request:45336"}, {"accept": false, "left": "pull_request:43071", "reason": "Both edit conversion mapping, but one is a Mixtral renaming cleanup while the other makes dynamic weight conversion recursive; not the same change.", "right": "pull_request:44300"}, {"accept": false, "left": "pull_request:43438", "reason": "Both are test-related, but they fix separate failures in different models and code paths.", "right": "pull_request:44456"}, {"accept": false, "left": "pull_request:43438", "reason": "Different failing generation tests for different models; no shared underlying bug.", "right": "pull_request:44515"}, {"accept": false, "left": "pull_request:43438", "reason": "One changes generation utils/docs for continuous batching, the other fixes a GPT-Neo test by constraining max_length; not the same issue.", "right": "pull_request:44675"}, {"accept": false, "left": "pull_request:43438", "reason": "Different integration/test fixes in different models; only broad test-maintenance similarity, not a common bug.", "right": "pull_request:44519"}], "summary": "The cluster is a set of unrelated pull requests loosely similar only by file area or general maintenance style; none of the soft pairs appear to fix the same underlying bug or implement the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43615|pull_request:44482", "pull_request:44251|pull_request:44285", "pull_request:43403|pull_request:44490", "pull_request:44412|pull_request:44566", "pull_request:44497|pull_request:45336", "pull_request:43071|pull_request:44300", "pull_request:43438|pull_request:44456", "pull_request:43438|pull_request:44515", "pull_request:43438|pull_request:44675", "pull_request:43438|pull_request:44519"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22116, "estimated_input_tokens": 10930, "item_count": 18, "node_count": 18, "serialized_chars": 43718, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:05:53Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6650338953fcccb19534774f7dfa85c468dc0060af03fb79b3df701627d1b256", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42230", "pull_request:43445", "pull_request:43710", "pull_request:43896", "pull_request:43938", "pull_request:44269", "pull_request:44321", "pull_request:44428", "pull_request:44490", "pull_request:44595", "pull_request:44602", "pull_request:44662", "pull_request:44733", "pull_request:44827", "pull_request:44828", "pull_request:45044", "pull_request:45204", "pull_request:45315"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44828 is the strongest representative of the only true duplicate-like change in the cluster; it targets the same concrete refactor as PR 44602 and is broader in scope.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44828 is the best canonical PR because it is part of the same repo-wide cache_position removal series and appears to be the later, more complete batch cleanup in that thread.", "confidence": 0.86, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44602", "reason": "Same underlying refactor: removing cache_position from multiple model forwards/generation paths. These could plausibly be merged into one PR.", "right": "pull_request:44828"}, {"accept": false, "left": "pull_request:42230", "reason": "Both mention XPU, but one changes attention-mask logic for SDPA while the other only updates VibeVoice test fixtures; different bugs/changes.", "right": "pull_request:44428"}, {"accept": false, "left": "pull_request:42230", "reason": "Different fixes: XPU attention-mask handling versus flash-attention kernel fallback plus XPU test expectations. Not the same concrete bug.", "right": "pull_request:44733"}, {"accept": false, "left": "pull_request:43938", "reason": "Unrelated changes: exaone_moe test expectations versus skipping invalid VoxtralRealtime tests.", "right": "pull_request:44321"}, {"accept": false, "left": "pull_request:43896", "reason": "DAC expected-output update from CI settings is unrelated to the Mistral4 model/test fixes.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:44595", "reason": "Both add model support, but for different models (CHMv2 vs PenguinVL) and different codepaths.", "right": "pull_request:44662"}, {"accept": false, "left": "pull_request:44321", "reason": "Different artifacts and bugs: VoxtralRealtime test skips versus EuroBERT model-parallel attention_mask device handling.", "right": "pull_request:44490"}, {"accept": false, "left": "pull_request:43710", "reason": "Docs-only changes for unrelated topics: model docs versus processor kwargs documentation.", "right": "pull_request:44269"}, {"accept": false, "left": "pull_request:43445", "reason": "Both involve MoE, but one is conversion/renaming mapping while the other changes router softmax behavior; not the same underlying fix.", "right": "pull_request:45315"}, {"accept": false, "left": "pull_request:45044", "reason": "Completely different bugs in different models: Janus generation handling versus VideoMT device mismatch.", "right": "pull_request:45204"}], "summary": "The cluster is mostly heterogeneous: docs additions, model-specific bug fixes, test expectation updates, and unrelated model implementations. The only clear duplicate-like thread is the cache_position cleanup across many models (PRs 44602 and 44828)."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44602|pull_request:44828", "pull_request:42230|pull_request:44428", "pull_request:42230|pull_request:44733", "pull_request:43938|pull_request:44321", "pull_request:43896|pull_request:44827", "pull_request:44595|pull_request:44662", "pull_request:44321|pull_request:44490", "pull_request:43710|pull_request:44269", "pull_request:43445|pull_request:45315", "pull_request:45044|pull_request:45204"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18686, "estimated_input_tokens": 9215, "item_count": 17, "node_count": 17, "serialized_chars": 36857, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:06:37Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b4a944cbc4a7c65971300d88f749094676a6e8fc2deb93a5bf663915f67503c3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43324", "pull_request:43907", "pull_request:43919", "pull_request:43936", "pull_request:44040", "pull_request:44051", "pull_request:44428", "pull_request:44482", "pull_request:44536", "pull_request:44602", "pull_request:44733", "pull_request:45044", "pull_request:45049", "pull_request:45209", "pull_request:45212", "pull_request:45214", "pull_request:45336"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:43936 is the strongest standalone PR in the set: it fixes an actual device-handling bug in moonshine_streaming code and is backed by tests, making it more representative than expectation-only or purely refactor-oriented PRs.", "canonical_issue_reason": null, "canonical_pr_reason": "No clear issue canonical exists; if one PR must represent the cluster, pull_request:43936 is the best fit because it fixes a concrete runtime bug in model code and includes corresponding test updates.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44482", "reason": "Both mention XPU-related test handling, but one updates Higgs Audio v2 expectations while the other changes tensor-parallel skip logic for CPU-only tests. Different code paths and different bugs.", "right": "pull_request:44536"}, {"accept": false, "left": "pull_request:45209", "reason": "These target unrelated models and failures: Nomic BERT device-suitability test adjustments versus Cohere ASR beam-search position-embedding handling. No shared underlying bug.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:43324", "reason": "Both are XPU test-support PRs, but they touch different model suites and only adjust expected outputs/fixtures. Same broad theme, not the same concrete change.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:43919", "reason": "Trainer gradient synchronization logic and RoPE config parsing are unrelated subsystems with distinct failure modes.", "right": "pull_request:45049"}, {"accept": false, "left": "pull_request:44040", "reason": "Same model family, but one fixes get_audio_features behavior while the other corrects a forward type hint. Different changes and not the same bug.", "right": "pull_request:44051"}, {"accept": false, "left": "pull_request:44602", "reason": "One removes cache_position across many models; the other removes shared weights in Gemma4. Different code-path problems.", "right": "pull_request:45336"}, {"accept": false, "left": "pull_request:44428", "reason": "VibeVoice tokenizer test fixtures and tensor-parallel CPU-only skip logic are unrelated; both are test maintenance, not one underlying defect.", "right": "pull_request:44536"}, {"accept": false, "left": "pull_request:44536", "reason": "The former only broadens CPU-only skip checks to XPU, while the latter fixes flash-attention kernel fallback and adds expectations for two models. Not mergeable as one fix.", "right": "pull_request:44733"}, {"accept": false, "left": "pull_request:43907", "reason": "Both concern image generation, but they are for different models and different defects: GLM Image test expectations versus Janus generation-mode/config handling.", "right": "pull_request:45044"}, {"accept": false, "left": "pull_request:43324", "reason": "Minimax XPU test expectations and Moonshine streaming device placement are separate model failures with no shared code path.", "right": "pull_request:43936"}], "summary": "The cluster is heterogeneous: mostly unrelated model-specific bug fixes and test expectation updates, with no true duplicate PR pairs. Several items are XPU/test-only changes, while others are isolated functional fixes in different models or utilities."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44482|pull_request:44536", "pull_request:45209|pull_request:45214", "pull_request:43324|pull_request:45212", "pull_request:43919|pull_request:45049", "pull_request:44040|pull_request:44051", "pull_request:44602|pull_request:45336", "pull_request:44428|pull_request:44536", "pull_request:44536|pull_request:44733", "pull_request:43907|pull_request:45044", "pull_request:43324|pull_request:43936"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 21834, "estimated_input_tokens": 10789, "item_count": 18, "node_count": 18, "serialized_chars": 43154, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:07:12Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "add621da5c2df8154df4a83546b9bfd201432511c44e3e75d335f37b3d69c17d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41763", "pull_request:42668", "pull_request:43339", "pull_request:43488", "pull_request:43565", "pull_request:43592", "pull_request:43936", "pull_request:43999", "pull_request:44001", "pull_request:44002", "pull_request:44025", "pull_request:44026", "pull_request:44027", "pull_request:44325", "pull_request:44330", "pull_request:44412", "pull_request:45336", "pull_request:45425"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:42668", "reason": "Different subsystems and goals: processor loading robustness vs config default-value cleanup. Not the same code-path problem.", "right": "pull_request:43592"}, {"accept": false, "left": "pull_request:43488", "reason": "The first is an intentional bad-format test PR; the second changes workflow permissions and removes a workflow. No shared bug/change.", "right": "pull_request:43565"}, {"accept": false, "left": "pull_request:41763", "reason": "Both touch TimesFm 2.5, but one adds the model/docs and the other fixes an MLP bias bug later. Different changes, not duplicate fixes.", "right": "pull_request:44325"}, {"accept": false, "left": "pull_request:44412", "reason": "Both are typing-related, but one expands type checking to quantizers while the other updates modeling_utils typing support and path handling. Not the same fix.", "right": "pull_request:45425"}, {"accept": false, "left": "pull_request:43339", "reason": "Different models and different test/runtime issues. Shared test-maintenance theme only, not the same underlying bug.", "right": "pull_request:43936"}, {"accept": false, "left": "pull_request:43999", "reason": "Same refactor pattern, but applied to different model code paths. Not one concrete bug/change that could be merged as a single PR.", "right": "pull_request:44001"}, {"accept": false, "left": "pull_request:43999", "reason": "Different models and forward signatures; similar cleanup style but unrelated implementations.", "right": "pull_request:44002"}, {"accept": false, "left": "pull_request:43999", "reason": "Refactor-output-tracing changes in different models. No shared failing code path.", "right": "pull_request:44025"}, {"accept": false, "left": "pull_request:43999", "reason": "Different architectures and files; this is only a naming/theme match, not the same fix.", "right": "pull_request:44026"}, {"accept": false, "left": "pull_request:43999", "reason": "Separate model-specific refactors. They could live in one umbrella cleanup PR, but they are not the same underlying bug.", "right": "pull_request:44027"}, {"accept": false, "left": "pull_request:44330", "reason": "Completely different changes: cache_position cleanup across models versus Gemma4 shared-weight loading behavior.", "right": "pull_request:45336"}], "summary": "Loose similarity cluster, but the PRs are about different models, different fixes, or unrelated repo/typing chores. No soft edge looks like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:42668|pull_request:43592", "pull_request:43488|pull_request:43565", "pull_request:41763|pull_request:44325", "pull_request:44412|pull_request:45425", "pull_request:43339|pull_request:43936", "pull_request:43999|pull_request:44001", "pull_request:43999|pull_request:44002", "pull_request:43999|pull_request:44025", "pull_request:43999|pull_request:44026", "pull_request:43999|pull_request:44027", "pull_request:44330|pull_request:45336"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18788, "estimated_input_tokens": 9266, "item_count": 18, "node_count": 18, "serialized_chars": 37061, "soft_pair_count": 12}, "cached_at": "2026-04-14T17:07:50Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c53b541fe8b76d7c87b90ded19cfa612fc74a17f7adf06657767bdf107396eee", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42802", "pull_request:43341", "pull_request:43699", "pull_request:43759", "pull_request:43910", "pull_request:43936", "pull_request:44051", "pull_request:44082", "pull_request:44296", "pull_request:44353", "pull_request:44426", "pull_request:44482", "pull_request:44502", "pull_request:44801", "pull_request:45044", "pull_request:45049", "pull_request:45204", "pull_request:45214"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No strong global best PR. 45204 is the clearest concrete runtime fix, but it is only one isolated device-mismatch change and does not represent the cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the items span unrelated fixes across different models, tests, and utilities rather than one shared code path or bug.", "confidence": 0.99, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44051", "reason": "Different changes in different areas: Gemma3n type-hint cleanup vs tokenizer-class registration fixes for DeepSeek/ModernBERT.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:45204", "reason": "Both are device-placement fixes, but for different models and different code paths; they are not the same underlying bug and would not merge into one PR.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:44082", "reason": "PatchTSMixer post_init/config cleanup is unrelated to RoPE-kwargs handling in configuration_utils.", "right": "pull_request:45049"}, {"accept": false, "left": "pull_request:42802", "reason": "Lasr flex-attention disablement and Cohere ASR position-embedding device placement fix different model-specific failures.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:43759", "reason": "Both touch test expectations, but for different models and different deterministic/XPU issues.", "right": "pull_request:44426"}, {"accept": false, "left": "pull_request:44353", "reason": "Separate model-specific XPU expectation updates; no shared underlying bug or code path.", "right": "pull_request:44426"}, {"accept": false, "left": "pull_request:44426", "reason": "Both are test-only expectation updates, but they target different models and different outputs.", "right": "pull_request:44482"}, {"accept": false, "left": "pull_request:44296", "reason": "Auto-docstring generation and a type-checker fix are unrelated maintenance changes.", "right": "pull_request:44502"}, {"accept": false, "left": "pull_request:43341", "reason": "Skipping unsupported tests is unrelated to changing tensor-parallel test process-group backend initialization.", "right": "pull_request:43699"}, {"accept": false, "left": "pull_request:43699", "reason": "One changes tensor-parallel backend setup; the other updates expected logits for a different model.", "right": "pull_request:43910"}, {"accept": false, "left": "pull_request:43699", "reason": "Tensor-parallel backend setup and Moonshine Streaming device-mismatch fixes are unrelated.", "right": "pull_request:43936"}, {"accept": false, "left": "pull_request:43936", "reason": "Moonshine Streaming device placement and Janus generation handling are different model-specific bugs.", "right": "pull_request:45044"}], "summary": "This cluster is a heterogeneous grab-bag of unrelated PRs: model-specific runtime fixes, test expectation churn, config/doc updates, and utility/test-harness changes. No duplicate underlying bug or single merged change emerges."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44051|pull_request:44801", "pull_request:45204|pull_request:45214", "pull_request:44082|pull_request:45049", "pull_request:42802|pull_request:45214", "pull_request:43759|pull_request:44426", "pull_request:44353|pull_request:44426", "pull_request:44426|pull_request:44482", "pull_request:44296|pull_request:44502", "pull_request:43341|pull_request:43699", "pull_request:43699|pull_request:43910", "pull_request:43699|pull_request:43936", "pull_request:43936|pull_request:45044"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 16614, "estimated_input_tokens": 8179, "item_count": 17, "node_count": 17, "serialized_chars": 32715, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:08:22Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "614ebcd1e8d349f98848776381029850cd5acae81488bbfb86f84ca83336897f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42230", "pull_request:42802", "pull_request:43341", "pull_request:43464", "pull_request:43555", "pull_request:43565", "pull_request:43615", "pull_request:43699", "pull_request:43907", "pull_request:43910", "pull_request:43938", "pull_request:44321", "pull_request:44426", "pull_request:44428", "pull_request:45044", "pull_request:45214", "pull_request:45261"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:45044 is the best standalone PR here because it addresses a real runtime bug with an explicit code-path fix and accompanying tests; the others are mostly test skips, expectation refreshes, or workflow permission tweaks.", "canonical_issue_reason": null, "canonical_pr_reason": "pull_request:45044 is the strongest representative PR in the set: it fixes a concrete model-generation bug in code, spans source and tests, and has a clear behavior change rather than a test-only adjustment.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43699", "reason": "Both touch test infrastructure, but one changes tensor-parallel process-group init and the other updates GLM Image expected outputs. Different models and different failure modes.", "right": "pull_request:43907"}, {"accept": false, "left": "pull_request:42230", "reason": "42230 fixes attention-mask handling for XPU/CUDA in core code; 44426 only updates Qwen2.5-VL test expectations for XPU. Same accelerator context, different bug and scope.", "right": "pull_request:44426"}, {"accept": false, "left": "pull_request:44426", "reason": "Both are XPU expectation updates, but they target different models and fixtures. They are separate test refreshes, not one underlying change.", "right": "pull_request:44428"}, {"accept": false, "left": "pull_request:42802", "reason": "42802 disables flex attention for Lasr due to BlockMask/padding incompatibility; 44321 skips unrelated VoxtralRealtime test cases. Different model families and different issues.", "right": "pull_request:44321"}, {"accept": false, "left": "pull_request:43615", "reason": "These are expected-output updates for different models (MixedInt8 GPT2 vs Jais2). They are not the same bug or patch.", "right": "pull_request:43910"}, {"accept": false, "left": "pull_request:43341", "reason": "43341 skips unsupported GLM Image tests; 45044 fixes Janus generation logic in code. One is a test quarantine, the other is a functional bug fix.", "right": "pull_request:45044"}, {"accept": false, "left": "pull_request:43910", "reason": "43910 updates Jais2 test expectations, while 45044 changes Janus generation internals. Different model, different change type, no shared underlying defect.", "right": "pull_request:45044"}, {"accept": false, "left": "pull_request:43341", "reason": "GLM Image test skips are unrelated to Cohere ASR's positional embedding device fix. They affect different subsystems and failure causes.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:43555", "reason": "Both are workflow/bot permission edits, but 43555 only adds contents: write to one workflow while 43565 also changes another workflow and deletes the style-bot workflow. Not the same concrete change.", "right": "pull_request:43565"}, {"accept": false, "left": "pull_request:42802", "reason": "42802 disables flex attention for Lasr; 43938 updates Exaone-MoE XPU/CUDA test expectations. Different models and different problem classes.", "right": "pull_request:43938"}, {"accept": false, "left": "pull_request:43464", "reason": "MarkupLM test dtype adjustment and CircleCI comment-script null handling are unrelated maintenance fixes with no shared bug or code path.", "right": "pull_request:45261"}], "summary": "This cluster is a mix of unrelated PRs: several XPU/test expectation updates, a few model-specific runtime fixes, and workflow permission changes. None of the suggested soft pairs look like the same underlying bug or mergeable change, so all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43699|pull_request:43907", "pull_request:42230|pull_request:44426", "pull_request:44426|pull_request:44428", "pull_request:42802|pull_request:44321", "pull_request:43615|pull_request:43910", "pull_request:43341|pull_request:45044", "pull_request:43910|pull_request:45044", "pull_request:43341|pull_request:45214", "pull_request:43555|pull_request:43565", "pull_request:42802|pull_request:43938", "pull_request:43464|pull_request:45261"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19658, "estimated_input_tokens": 9701, "item_count": 18, "node_count": 18, "serialized_chars": 38802, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:10:03Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c696047627708af07ce83b5e3db008a61dc2f293b0f93d4cf87b8e2f71db268e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:39390", "pull_request:42230", "pull_request:42668", "pull_request:43341", "pull_request:43588", "pull_request:43592", "pull_request:43823", "pull_request:43913", "pull_request:43920", "pull_request:43956", "pull_request:44126", "pull_request:44527", "pull_request:44602", "pull_request:44801", "pull_request:45061", "pull_request:45209", "pull_request:45261", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "43956 is the best standalone PR candidate because it is narrowly scoped, mechanically clear, and directly fixes a concrete loading/mapping path without mixing in broader refactors or unrelated test/infra changes.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43956 is the clearest representative of the only tight thematic overlap: it is a focused qwen3-vl-moe weight-mapping fix with a small, concrete code-path change and targeted test update. The other items are unrelated or much broader.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43592", "reason": "Different changes: one fixes default config values across several models, the other adds a new MobileLLM model implementation and tests.", "right": "pull_request:43823"}, {"accept": false, "left": "pull_request:43920", "reason": "Unrelated areas: one adjusts test fetching for cache_utils, the other adds a deprecated flash-attn helper in import utils.", "right": "pull_request:45061"}, {"accept": false, "left": "pull_request:43592", "reason": "Different bugs: config default handling vs tokenizer-class hub metadata fixes.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:44527", "reason": "No shared underlying change: musicgen integration test dtype fix vs CircleCI workflow null-handling logic.", "right": "pull_request:45261"}, {"accept": false, "left": "pull_request:42668", "reason": "Both touch conversion mapping, but they fix different model families and different mapping issues; not the same concrete change.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44126", "reason": "Related generation/cache themes, but one simplifies input prep while the other removes cache_position across many models; too broad and not one concrete bug.", "right": "pull_request:44602"}, {"accept": false, "left": "pull_request:43588", "reason": "Different model-specific fix vs multi-model config default cleanup; no shared code-path bug.", "right": "pull_request:43592"}, {"accept": false, "left": "pull_request:43341", "reason": "Both alter tests, but for unrelated reasons: unsupported glm_image offload tests vs making Nomic BERT tests device-agnostic.", "right": "pull_request:45209"}, {"accept": false, "left": "pull_request:43913", "reason": "Same subsystem, but the fixes are not the same patch: 43913 adds sentinel-based transpose infrastructure, while 43956 changes qwen3_vl_moe mapping patterns/operations directly.", "right": "pull_request:43956"}, {"accept": false, "left": "pull_request:39390", "reason": "Different scopes: a ShieldGemma2 CI test tweak vs an SDPA/xPU attention-mask implementation fix.", "right": "pull_request:42230"}], "summary": "The cluster is mostly heterogeneous: test-only CI fixes, model additions, config cleanup, generation/cache refactors, and unrelated automation changes. There is one near-overlap around qwen3_vl_moe weight mapping, but the two PRs use different fix strategies and don\u2019t look like the same patch."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43592|pull_request:43823", "pull_request:43920|pull_request:45061", "pull_request:43592|pull_request:44801", "pull_request:44527|pull_request:45261", "pull_request:42668|pull_request:45340", "pull_request:44126|pull_request:44602", "pull_request:43588|pull_request:43592", "pull_request:43341|pull_request:45209", "pull_request:43913|pull_request:43956", "pull_request:39390|pull_request:42230"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 21796, "estimated_input_tokens": 10770, "item_count": 18, "node_count": 18, "serialized_chars": 43078, "soft_pair_count": 9}, "cached_at": "2026-04-14T17:10:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2a54af4fde430e9579a43b89c065cb1fa768801b718016a7f4c65175249a077b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42802", "pull_request:42848", "pull_request:43324", "pull_request:43400", "pull_request:43579", "pull_request:43710", "pull_request:44033", "pull_request:44157", "pull_request:44414", "pull_request:44600", "pull_request:44647", "pull_request:44657", "pull_request:44801", "pull_request:44833", "pull_request:44834", "pull_request:45032", "pull_request:45212", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 42848 is the best representative if one must be chosen: it is the broadest infrastructure change, touching the attention-mask interface across many models and having the most central scope.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR clearly subsumes the others; the set is heterogeneous rather than a duplicate cluster.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44033", "reason": "Both touch CI failure-commenting tooling, but one changes self-comment CI messaging while the other adds multi-runner failed-test checking; different workflows and goals.", "right": "pull_request:45032"}, {"accept": false, "left": "pull_request:44647", "reason": "Unrelated changes: continuous batching CUDA/device support vs a unit-test BF16 embedding-size fix for Electra.", "right": "pull_request:44657"}, {"accept": false, "left": "pull_request:44414", "reason": "Model-loading tqdm/logging cleanup and tokenizer-class mapping fixes are different code paths and unrelated bugs.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:43400", "reason": "A new model addition/docs PR and a docs-only correction PR for another model are not the same change.", "right": "pull_request:43710"}, {"accept": false, "left": "pull_request:42848", "reason": "Both are attention-mask related, but 42848 is a broad interface refactor while 44157 fixes a specific packed-input mask bug in Qwen-VL; not mergeable as one PR.", "right": "pull_request:44157"}, {"accept": false, "left": "pull_request:44833", "reason": "Both are modular-example syncs, but they address different example files and different converter/output issues rather than one underlying bug.", "right": "pull_request:44834"}, {"accept": false, "left": "pull_request:42802", "reason": "LASR flex-attention support change and a Minimax M2 XPU test expectation update are unrelated.", "right": "pull_request:43324"}, {"accept": false, "left": "pull_request:43579", "reason": "Both add XPU test support, but for different models and failures; no shared concrete bug or code path.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:44600", "reason": "A redundant assignment removal in Paligemma is unrelated to VLM conversion-mapping fixes.", "right": "pull_request:45340"}], "summary": "These PRs are mostly unrelated one-off fixes, test updates, or documentation/model additions. The soft pairs share broad subsystems or filenames in a few cases, but they do not look like the same underlying bug or change, so there is no real duplicate cluster here."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44033|pull_request:45032", "pull_request:44647|pull_request:44657", "pull_request:44414|pull_request:44801", "pull_request:43400|pull_request:43710", "pull_request:42848|pull_request:44157", "pull_request:44833|pull_request:44834", "pull_request:42802|pull_request:43324", "pull_request:43579|pull_request:45212", "pull_request:44600|pull_request:45340"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18890, "estimated_input_tokens": 9317, "item_count": 18, "node_count": 18, "serialized_chars": 37265, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:11:41Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ce9476d6cec8e3a59915126d6c724609bc333d575f528a5b6d494600d7830c56", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42230", "pull_request:43178", "pull_request:43614", "pull_request:43759", "pull_request:43913", "pull_request:43953", "pull_request:43989", "pull_request:44125", "pull_request:44353", "pull_request:44647", "pull_request:44657", "pull_request:44733", "pull_request:45033", "pull_request:45044", "pull_request:45061", "pull_request:45204", "pull_request:45209", "pull_request:45284"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43953 is the best overall PR representative because it directly and narrowly addresses the qwen3_vl_moe checkpoint conversion bug; the rest of the items are unrelated fixes across different models, tests, and utilities.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43953 is the strongest canonical PR candidate: it is the clearest self-contained fix for the qwen3_vl_moe conversion_mapping bug, and the only accepted soft pair centers on that same target file and model.", "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43759", "reason": "Both are XPU-related test expectation tweaks, but they target different models (youtu vs olmo_hybrid) and different test paths; not the same underlying change.", "right": "pull_request:44353"}, {"accept": false, "left": "pull_request:44657", "reason": "Electra test config rounding fix and flash-attn utility/test changes are unrelated subsystems and bugs.", "right": "pull_request:44733"}, {"accept": false, "left": "pull_request:44647", "reason": "One is continuous batching device support, the other is a VideomT device mismatch fix; same broad device theme but different code paths and bugs.", "right": "pull_request:45204"}, {"accept": false, "left": "pull_request:44647", "reason": "Continuous batching runtime changes and Nomic BERT test-device generalization are unrelated.", "right": "pull_request:45209"}, {"accept": false, "left": "pull_request:43614", "reason": "DiffLLaMA contiguous-input handling for XPU and Youtu deterministic expectations are separate issues in different models.", "right": "pull_request:43759"}, {"accept": false, "left": "pull_request:42230", "reason": "Attention-mask unmasking for XPU/CUDA is unrelated to continuous batching graph/device support.", "right": "pull_request:44647"}, {"accept": false, "left": "pull_request:43178", "reason": "A large AMD CI pipeline fix and Qwen2 expectation updates are not the same code-path problem.", "right": "pull_request:45284"}, {"accept": true, "left": "pull_request:43913", "reason": "Both fix qwen3_vl_moe checkpoint conversion in conversion_mapping.py; they address the same underlying weight-mapping bug with alternative implementations and could plausibly be merged into one PR.", "right": "pull_request:43953"}, {"accept": false, "left": "pull_request:45033", "reason": "Backwards-compatibility aliasing in image processing and flash-attn version/deprecation support are unrelated.", "right": "pull_request:45061"}, {"accept": false, "left": "pull_request:43989", "reason": "Both touch video_processing_auto.py, but one fixes torchvision-absent lookup behavior while the other changes class lookup/error reporting; not the same concrete bug.", "right": "pull_request:44125"}, {"accept": false, "left": "pull_request:44657", "reason": "An Electra test configuration tweak and a Janus image-generation bug fix are unrelated.", "right": "pull_request:45044"}], "summary": "Mostly heterogeneous PRs; the only plausible duplicate pair is the qwen3_vl_moe conversion-mapping fixes. All other soft pairs are just broad theme matches (device/XPU/CI/tests) without the same concrete code path."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43759|pull_request:44353", "pull_request:44657|pull_request:44733", "pull_request:44647|pull_request:45204", "pull_request:44647|pull_request:45209", "pull_request:43614|pull_request:43759", "pull_request:42230|pull_request:44647", "pull_request:43178|pull_request:45284", "pull_request:43913|pull_request:43953", "pull_request:45033|pull_request:45061", "pull_request:43989|pull_request:44125", "pull_request:44657|pull_request:45044"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18250, "estimated_input_tokens": 8997, "item_count": 17, "node_count": 17, "serialized_chars": 35985, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:12:11Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d7bcebd18f82f45734650611483974090c3b2c8d29be6d4b373ef806185a2909", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42668", "pull_request:43339", "pull_request:43579", "pull_request:43712", "pull_request:43758", "pull_request:43759", "pull_request:43913", "pull_request:43920", "pull_request:43936", "pull_request:44426", "pull_request:44428", "pull_request:44482", "pull_request:44653", "pull_request:44801", "pull_request:44827", "pull_request:45061", "pull_request:45433"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44827 is the strongest anchor only because it is the most substantive functional code change with broad runtime impact; the cluster is still too heterogeneous for a true canonical PR.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR: the set spans unrelated test updates, auto-loader tweaks, typing cleanup, and separate model/integration fixes.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43936", "reason": "Different fixes: moonshine_streaming device/forward behavior vs AutoImageProcessor local implementation detection.", "right": "pull_request:44653"}, {"accept": false, "left": "pull_request:43758", "reason": "Both stabilize XPU outputs, but for different models and different test paths; not the same bug.", "right": "pull_request:44426"}, {"accept": false, "left": "pull_request:43759", "reason": "Separate model test expectation updates for youtu and higgs_audio_v2; no shared concrete failure.", "right": "pull_request:44482"}, {"accept": false, "left": "pull_request:43712", "reason": "Modeling typing cleanup is unrelated to flash-attn version/deprecation compatibility.", "right": "pull_request:45061"}, {"accept": false, "left": "pull_request:43712", "reason": "typing/explanations in modeling_utils is unrelated to tests_fetcher coverage for cache_utils.", "right": "pull_request:43920"}, {"accept": false, "left": "pull_request:42668", "reason": "Both touch auto/tokenizer-related code, but they fix different model mappings and different loader behavior.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:43339", "reason": "Both are XPU test updates, but for different models and different expected-output fixes.", "right": "pull_request:43579"}, {"accept": false, "left": "pull_request:43759", "reason": "Different audio/LLM test expectation changes for unrelated models; not a single underlying issue.", "right": "pull_request:44428"}, {"accept": false, "left": "pull_request:44827", "reason": "Both involve MoE-related files, but 44827 fixes Mistral4/MoE behavior while 45433 adds SonicMoe integration; distinct code paths.", "right": "pull_request:45433"}, {"accept": false, "left": "pull_request:43712", "reason": "General typing cleanup and qwen3_vl_moe checkpoint conversion changes are unrelated.", "right": "pull_request:43913"}], "summary": "These PRs are mostly unrelated maintenance or model-specific fixes; the soft pairs share broad themes like XPU tests or MoE/integration code, but not the same underlying bug or change. None should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43936|pull_request:44653", "pull_request:43758|pull_request:44426", "pull_request:43759|pull_request:44482", "pull_request:43712|pull_request:45061", "pull_request:43712|pull_request:43920", "pull_request:42668|pull_request:44801", "pull_request:43339|pull_request:43579", "pull_request:43759|pull_request:44428", "pull_request:44827|pull_request:45433", "pull_request:43712|pull_request:43913"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22664, "estimated_input_tokens": 11204, "item_count": 18, "node_count": 18, "serialized_chars": 44813, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:13:16Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e0fc231359d6482742e1aaa4e55484d0955af03471fd6c386fb12ee9d41c4067", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42668", "pull_request:43400", "pull_request:43710", "pull_request:43758", "pull_request:43759", "pull_request:43787", "pull_request:43930", "pull_request:44125", "pull_request:44286", "pull_request:44490", "pull_request:44601", "pull_request:44653", "pull_request:44662", "pull_request:44808", "pull_request:44828", "pull_request:45209", "pull_request:45212", "pull_request:45336"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "Same as canonical: it is the most complete feature-addition PR with the widest code surface and the most review activity, making it the best representative target among these unrelated PRs.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44662 is the broadest, most substantive standalone change here: a full PenguinVL implementation spanning docs, auto registries, modeling, processing, and tests. If a single PR must anchor the cluster, this is the strongest representative.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44601", "reason": "Different changes: pipeline-parallel loading support vs. PP-OCRv5 model support. Same distributed/vision theme only at a high level, not the same bug or patch.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:43710", "reason": "Docs-only cleanup for GLM-OCR/EomT-DINOv3 vs. a new PenguinVL model implementation. No shared underlying change.", "right": "pull_request:44662"}, {"accept": false, "left": "pull_request:44490", "reason": "EuroBERT model-parallelism masking fix vs. AutoImageProcessor local-implementation detection. Different code paths and failure modes.", "right": "pull_request:44653"}, {"accept": false, "left": "pull_request:43758", "reason": "Both stabilize tests, but for different models and different expected outputs. This is superficial test-hardening similarity, not duplicate work.", "right": "pull_request:45209"}, {"accept": false, "left": "pull_request:42668", "reason": "AudioFlamingo3 processor registration cleanup vs. video processor loading error handling. Different auto-registry subsystems and unrelated fixes.", "right": "pull_request:44125"}, {"accept": false, "left": "pull_request:43758", "reason": "Both add XPU-related test support, but one is EomT-DINOv3 determinism and the other is MusicFlamingo fixture support. Not the same concrete change.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:43787", "reason": "PT 2.4+ cleanup/removal of legacy checks vs. autocast API usage improvement. Shared compatibility motive, but not the same code-path problem.", "right": "pull_request:43930"}, {"accept": false, "left": "pull_request:44828", "reason": "Removing cache_position in legacy models vs. Gemma4 shared-weight removal/loading behavior. Distinct model internals and unrelated fixes.", "right": "pull_request:45336"}, {"accept": false, "left": "pull_request:43400", "reason": "VibeVoice model addition vs. Fouroversix future-model/quantization support. Both are model enablement work, but not the same implementation or bug.", "right": "pull_request:44286"}, {"accept": false, "left": "pull_request:43758", "reason": "Both add deterministic-for-XPU test guards, but they apply to different model tests and different assertions. Not mergeable as one PR.", "right": "pull_request:43759"}], "summary": "A mixed bag of unrelated PRs: model additions (PenguinVL, PP-OCRv5, VibeVoice), infra/auto-loader fixes, docs tweaks, and device/test stability patches. The apparent overlaps are only superficial (shared XPU/tests, auto classes, or model-support wording), not the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44601|pull_request:44808", "pull_request:43710|pull_request:44662", "pull_request:44490|pull_request:44653", "pull_request:43758|pull_request:45209", "pull_request:42668|pull_request:44125", "pull_request:43758|pull_request:45212", "pull_request:43787|pull_request:43930", "pull_request:44828|pull_request:45336", "pull_request:43400|pull_request:44286", "pull_request:43758|pull_request:43759"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19274, "estimated_input_tokens": 9509, "item_count": 18, "node_count": 18, "serialized_chars": 38034, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:13:50Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9c1ba8d006658dcdf1546f73e1c84b9e699d72b5fa420ed582e8a8af8717390c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43324", "pull_request:43500", "pull_request:43553", "pull_request:43554", "pull_request:43555", "pull_request:43565", "pull_request:43579", "pull_request:43710", "pull_request:43712", "pull_request:43913", "pull_request:43916", "pull_request:44236", "pull_request:44330", "pull_request:44414", "pull_request:44433", "pull_request:44497", "pull_request:44544", "pull_request:45044"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44433 is the best global representative because it is a clear, self-contained defect fix with explicit behavior change and tests, unlike the other items that are mostly narrow maintenance or unrelated changes.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44433 is the strongest standalone bug fix in the set: it addresses a concrete runtime validation path, has broad model/test coverage, and is more substantive than the workflow/docs-only PRs.", "confidence": 0.42, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43553", "reason": "Related workflow area, but not the same concrete change: 43553 adds the style bot workflow, while 43565 later revises/deletes workflow files and adds separate permissions. They are not mergeable as one duplicate fix.", "right": "pull_request:43565"}, {"accept": false, "left": "pull_request:43710", "reason": "Different subsystems and problems: docs/model page updates vs removing cache_position in generation code across models. No shared underlying bug or change.", "right": "pull_request:44330"}, {"accept": false, "left": "pull_request:44497", "reason": "Unrelated fixes: tie-weights behavior in model loading versus ANSI styling in loading reports. Different code paths and goals.", "right": "pull_request:44544"}, {"accept": false, "left": "pull_request:43324", "reason": "Both touch accelerator test expectations, but they are for different models and different failing cases. Same broad theme, not the same bug.", "right": "pull_request:43579"}, {"accept": false, "left": "pull_request:44236", "reason": "One fixes zero3 init/config dependencies; the other reduces tqdm verbosity during model loading. Different functionality and no duplicate concrete issue.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:43554", "reason": "43554 only changes the style bot command type, while 43565 changes workflow permissions and deletes/reworks workflow files. Not the same underlying fix.", "right": "pull_request:43565"}, {"accept": false, "left": "pull_request:43913", "reason": "Different problems: Qwen3-VL-MoE weight mapping/sentinel transpose logic versus tie-weight symmetry in model saving/loading. Not the same code-path issue.", "right": "pull_request:44497"}, {"accept": false, "left": "pull_request:43500", "reason": "43500 is a test-file placeholder/dummy edit, while 43555 changes workflow permissions. No shared underlying bug or change.", "right": "pull_request:43555"}, {"accept": false, "left": "pull_request:44433", "reason": "Both concern generation-related bugs, but they affect different models and different failure modes: missing mm_token_type_ids versus Janus image-generation config handling.", "right": "pull_request:45044"}, {"accept": false, "left": "pull_request:43712", "reason": "One is a typing/documentation cleanup for model properties; the other renames harmonizes inputs_embeds usage across generation and model code. Related area, but not the same change.", "right": "pull_request:43916"}], "summary": "The cluster is heterogeneous: most items are unrelated PRs spanning workflows, docs, model fixes, and utility refactors. None of the soft pairs look like true duplicates or a single mergeable change, so all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43553|pull_request:43565", "pull_request:43710|pull_request:44330", "pull_request:44497|pull_request:44544", "pull_request:43324|pull_request:43579", "pull_request:44236|pull_request:44414", "pull_request:43554|pull_request:43565", "pull_request:43913|pull_request:44497", "pull_request:43500|pull_request:43555", "pull_request:44433|pull_request:45044", "pull_request:43712|pull_request:43916"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19560, "estimated_input_tokens": 9652, "item_count": 17, "node_count": 17, "serialized_chars": 38606, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:14:22Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "753147a668dacc0d84c87f442b03b9a77d521bc0ebb42099e0ef5f3557595153", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42186", "pull_request:43341", "pull_request:43486", "pull_request:43710", "pull_request:43778", "pull_request:43910", "pull_request:43913", "pull_request:43936", "pull_request:44236", "pull_request:44353", "pull_request:44497", "pull_request:44828", "pull_request:45061", "pull_request:45204", "pull_request:45336", "pull_request:45340", "pull_request:45407"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45340 is the most substantive and broadly representative code-path fix among the candidates, but the set is not a real duplicate cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "No true duplicate cluster emerges here; if one PR must represent the set, PR 45340 is the strongest general code-fix candidate because it changes core conversion mapping/loading behavior rather than just tests or docs.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44353", "reason": "OLMo hybrid test expectations vs trainer wrapping logic for 4-bit quantized models; different code paths and different problems.", "right": "pull_request:45407"}, {"accept": false, "left": "pull_request:43486", "reason": "Video batching bug fix vs tie-weights behavior in modeling utils; unrelated functionality.", "right": "pull_request:44497"}, {"accept": false, "left": "pull_request:43778", "reason": "Mamba initialization changes vs zero3 init/config dependency fix; not the same bug or change.", "right": "pull_request:44236"}, {"accept": false, "left": "pull_request:42186", "reason": "Attention mask slicing cleanup across models vs Gemma4 shared-weight loading change; unrelated issues.", "right": "pull_request:45336"}, {"accept": false, "left": "pull_request:43913", "reason": "Qwen3-VL MoE weight mapping/sentinel fix vs flash-attn compatibility/deprecation helper; no shared code-path problem.", "right": "pull_request:45061"}, {"accept": false, "left": "pull_request:44236", "reason": "DeepSpeed/zero3 init config fix vs VLM conversion mapping updates; separate subsystems and fixes.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43341", "reason": "Skipped unsupported GLM-Image tests vs trainer 4-bit DataParallel gating; test triage and training wrapper behavior are unrelated.", "right": "pull_request:45407"}, {"accept": false, "left": "pull_request:43910", "reason": "Jais2 expected-output test update vs trainer quantized-model wrapping; unrelated changes.", "right": "pull_request:45407"}, {"accept": false, "left": "pull_request:43936", "reason": "Moonshine streaming device-mismatch fix vs trainer 4-bit DataParallel behavior; different models and bug classes.", "right": "pull_request:45407"}, {"accept": false, "left": "pull_request:45204", "reason": "VideoMT device mismatch fix vs trainer 4-bit quantized model wrapping; not the same underlying issue.", "right": "pull_request:45407"}, {"accept": false, "left": "pull_request:43710", "reason": "Docs for GLM-OCR/EomT-DINOv3 vs removing cache_position in many models; no overlap in bug or change type.", "right": "pull_request:44828"}], "summary": "These pull requests are largely unrelated and span separate models, tests, conversion logic, docs, and trainer behavior. None of the proposed soft matches look like the same underlying bug or change, so all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44353|pull_request:45407", "pull_request:43486|pull_request:44497", "pull_request:43778|pull_request:44236", "pull_request:42186|pull_request:45336", "pull_request:43913|pull_request:45061", "pull_request:44236|pull_request:45340", "pull_request:43341|pull_request:45407", "pull_request:43910|pull_request:45407", "pull_request:43936|pull_request:45407", "pull_request:45204|pull_request:45407", "pull_request:43710|pull_request:44828"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20020, "estimated_input_tokens": 9882, "item_count": 17, "node_count": 17, "serialized_chars": 39525, "soft_pair_count": 9}, "cached_at": "2026-04-14T17:16:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "226599c06835d9e9bfd0be11e580fc3d8b67bf985a7f9131853fea86cd532fe0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42186", "pull_request:43030", "pull_request:43424", "pull_request:43445", "pull_request:43579", "pull_request:43712", "pull_request:43942", "pull_request:43995", "pull_request:44037", "pull_request:44051", "pull_request:44235", "pull_request:44300", "pull_request:44330", "pull_request:44334", "pull_request:44414", "pull_request:45190", "pull_request:45214"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No PR is a good global representative here because the cluster mixes unrelated changes; the closest thing to a broad representative is still too arbitrary to serve as canonical.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43030", "reason": "Both touch model forward/attention plumbing, but 43030 is a cross-model mask-interface refactor while 43995 is a Falcon output-collection refactor; different code paths and change goals.", "right": "pull_request:43995"}, {"accept": false, "left": "pull_request:44037", "reason": "44037 fixes MoE weight mapping and transpose checks, while 45190 is a CLI typing/AST cleanup; unrelated functionality.", "right": "pull_request:45190"}, {"accept": false, "left": "pull_request:43445", "reason": "Both involve conversion mapping, but 43445 fixes specific MoE router/weight names and 44300 makes conversion recursive across many models; not the same concrete bug.", "right": "pull_request:44300"}, {"accept": false, "left": "pull_request:44051", "reason": "Type-hint correction for Gemma3n forward output vs tokenizer-class registry update for Fuyu; no shared underlying change.", "right": "pull_request:44235"}, {"accept": false, "left": "pull_request:43712", "reason": "One is typing/explanation cleanup for model properties, the other reduces tqdm verbosity in loading; unrelated.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:43579", "reason": "43579 changes XPU test expectations for Solar Open, while 45214 fixes a device placement bug in Cohere ASR positional embeddings; different models and bugs.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:42186", "reason": "Both concern attention/cache-position behavior, but 42186 removes mask slicing in eager attention while 44330 removes cache_position usage in several models; too broad and not the same fix.", "right": "pull_request:44330"}, {"accept": false, "left": "pull_request:44334", "reason": "Both modify add_new_model_like.py, but 44334 fixes a cookiecutter/tokenizer-class initialization issue and 45190 fixes type handling in the CLI parser; distinct bugs.", "right": "pull_request:45190"}, {"accept": false, "left": "pull_request:43424", "reason": "These are both test additions, but for different subsystems and failures: executorch dynamic-shape export vs Bark input_embeds behavior; not a duplicate change.", "right": "pull_request:43942"}], "summary": "This cluster is heterogeneous: every soft pair spans different bug fixes, refactors, or test/doc updates, so there is no credible duplicate core and no single canonical PR/issue stands out."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43030|pull_request:43995", "pull_request:44037|pull_request:45190", "pull_request:43445|pull_request:44300", "pull_request:44051|pull_request:44235", "pull_request:43712|pull_request:44414", "pull_request:43579|pull_request:45214", "pull_request:42186|pull_request:44330", "pull_request:44334|pull_request:45190", "pull_request:43424|pull_request:43942"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22578, "estimated_input_tokens": 11161, "item_count": 18, "node_count": 18, "serialized_chars": 44641, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:17:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5993e442b7bf915fa1b4486fc020e4fd564e0fc9585e3527f25b87d63510892a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42823", "pull_request:43098", "pull_request:43178", "pull_request:43247", "pull_request:43274", "pull_request:43345", "pull_request:43578", "pull_request:43690", "pull_request:43919", "pull_request:43942", "pull_request:43953", "pull_request:44033", "pull_request:44037", "pull_request:44082", "pull_request:44601", "pull_request:44833", "pull_request:45190", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44037 is the best representative of the cluster\u2019s only meaningful overlap (Qwen3VL-MoE conversion mapping), but the overall set is not a true duplicate cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR cleanly represents the cluster because the items are largely unrelated. If forced to pick one, PR 44037 is the strongest standalone candidate since it is the most substantive and self-contained of the only partially related pair.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43690", "reason": "Different fixes: notebook/repl custom-model crash handling vs regenerating modular example converters. No shared bug or change.", "right": "pull_request:44833"}, {"accept": false, "left": "pull_request:43098", "reason": "PP-DocLayoutV3 model support is unrelated to native pipeline-parallel loading support.", "right": "pull_request:44601"}, {"accept": false, "left": "pull_request:43247", "reason": "PP-OCRv5 mobile detection model support is unrelated to pipeline-parallel loading changes.", "right": "pull_request:44601"}, {"accept": false, "left": "pull_request:43274", "reason": "PP-OCRv5 server detection model support is unrelated to pipeline-parallel loading changes.", "right": "pull_request:44601"}, {"accept": false, "left": "pull_request:43345", "reason": "PP-LCNet model support is unrelated to native pipeline-parallel loading support.", "right": "pull_request:44601"}, {"accept": false, "left": "pull_request:42823", "reason": "LASR tokenizer mapping fix and Youtu test repo alignment are different model-specific maintenance changes.", "right": "pull_request:43578"}, {"accept": false, "left": "pull_request:43942", "reason": "Bark test cleanup and modular converter regeneration are unrelated changes with no shared underlying bug.", "right": "pull_request:44833"}, {"accept": false, "left": "pull_request:45190", "reason": "CLI typing fixes and conversion-mapping updates for VLMs are different subsystems and different bugs.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43178", "reason": "Pipeline CI stabilization and self-comment workflow messaging are unrelated CI chores, not the same fix.", "right": "pull_request:44033"}, {"accept": false, "left": "pull_request:43919", "reason": "Gradient sync behavior in Trainer and PatchTSMixer post_init handling are unrelated training/model init fixes.", "right": "pull_request:44082"}, {"accept": false, "left": "pull_request:43953", "reason": "Both touch qwen3_vl_moe conversion mapping, but 44037 also adds dim-check/transpose infrastructure and tests; this is not the same concrete change and should not be merged as a duplicate.", "right": "pull_request:44037"}], "summary": "This cluster is mostly a grab-bag of unrelated pull requests. The only near-overlap is the two Qwen3VL-MoE conversion-mapping PRs, but one is a narrow mapping-table fix and the other adds a broader transpose/dim-check conversion change with tests, so I would not merge them as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43690|pull_request:44833", "pull_request:43098|pull_request:44601", "pull_request:43247|pull_request:44601", "pull_request:43274|pull_request:44601", "pull_request:43345|pull_request:44601", "pull_request:42823|pull_request:43578", "pull_request:43942|pull_request:44833", "pull_request:45190|pull_request:45340", "pull_request:43178|pull_request:44033", "pull_request:43919|pull_request:44082", "pull_request:43953|pull_request:44037"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 21058, "estimated_input_tokens": 10401, "item_count": 18, "node_count": 18, "serialized_chars": 41601, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:17:52Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "117f71be3af6235ab7c50d3b4bdbbd5a68e3846f2f3c98ca25d0ef19b7518ee3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40755", "pull_request:41763", "pull_request:43030", "pull_request:43326", "pull_request:43445", "pull_request:43486", "pull_request:43683", "pull_request:43851", "pull_request:43913", "pull_request:44126", "pull_request:44236", "pull_request:44433", "pull_request:44544", "pull_request:44770", "pull_request:44943", "pull_request:45190", "pull_request:45340", "pull_request:45429"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No strong global best PR; each item is a separate change, so any choice would be arbitrary rather than representative of a duplicate cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the cluster is heterogeneous and the candidate PRs do not share a single underlying change or code path.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43683", "reason": "Different changes: forced fp32 upcasting in loading vs simplifying generation input preparation.", "right": "pull_request:44126"}, {"accept": false, "left": "pull_request:44236", "reason": "Unrelated scope: DeepSpeed zero3 init config fix vs CLI type-handling cleanup.", "right": "pull_request:45190"}, {"accept": false, "left": "pull_request:43030", "reason": "ModernBERT attention-interface refactor is not the same change as MoE router mapping fixes.", "right": "pull_request:43445"}, {"accept": false, "left": "pull_request:44770", "reason": "Different bugs: strict-config handling across several models vs config typing and rope validation updates.", "right": "pull_request:44943"}, {"accept": false, "left": "pull_request:43851", "reason": "Both touch workflows, but they fix different workflow files and different CI behavior.", "right": "pull_request:45429"}, {"accept": false, "left": "pull_request:43913", "reason": "Qwen3-VL-MoE weight-mapping/sentinel logic is unrelated to loading-report ANSI styling.", "right": "pull_request:44544"}, {"accept": false, "left": "pull_request:43486", "reason": "Video batching fix and Qwen3-VL-MoE conversion mapping are entirely different code paths.", "right": "pull_request:43913"}, {"accept": false, "left": "pull_request:44433", "reason": "Multimodal token-type validation is a runtime model bug; conversion mappings for VLMs are a separate conversion concern.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:40755", "reason": "TimesFM covariate support and TimesFM2.5 model introduction are related only by model family, not the same concrete change.", "right": "pull_request:41763"}, {"accept": false, "left": "pull_request:43326", "reason": "MXFP4 dequantization fix and loading-report formatting fix do not overlap in functionality or code path.", "right": "pull_request:44544"}], "summary": "These PRs are not duplicates of each other; they span unrelated model features, bug fixes, docs, workflows, and utility changes across different subsystems. No single canonical PR emerges from this mixed set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43683|pull_request:44126", "pull_request:44236|pull_request:45190", "pull_request:43030|pull_request:43445", "pull_request:44770|pull_request:44943", "pull_request:43851|pull_request:45429", "pull_request:43913|pull_request:44544", "pull_request:43486|pull_request:43913", "pull_request:44433|pull_request:45340", "pull_request:40755|pull_request:41763", "pull_request:43326|pull_request:44544"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20500, "estimated_input_tokens": 10122, "item_count": 18, "node_count": 18, "serialized_chars": 40485, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:18:36Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0d22333d95ee4f98d3e172685539d990afe5a2982ae5ef3ad58af439ed67329b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41722", "pull_request:42028", "pull_request:42186", "pull_request:43326", "pull_request:43486", "pull_request:43683", "pull_request:43823", "pull_request:43916", "pull_request:43920", "pull_request:43953", "pull_request:44037", "pull_request:44126", "pull_request:44251", "pull_request:44502", "pull_request:44544", "pull_request:45061", "pull_request:45078", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#44251 is the best PR to keep as a representative artifact if forced: it is merged, broadly scoped, and well-reviewed, while the rest of the set are either unrelated or narrowly targeted fixes.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR fits the cluster because the PRs are not duplicates. If a representative PR must be picked, #44251 is the strongest standalone candidate: a merged, substantial feature addition with extensive review, unlike the narrow or unrelated fixes.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43683", "reason": "Different problems: forced fp32 upcasting vs a type-checker guard in import_utils. No shared code-path or concrete bug.", "right": "pull_request:44502"}, {"accept": false, "left": "pull_request:43683", "reason": "Unrelated fixes: fp32 upcasting/load config handling versus ANSI styling in loading reports.", "right": "pull_request:44544"}, {"accept": false, "left": "pull_request:43326", "reason": "Completely different subsystems and bugs: MXFP4 dequantization versus video batching for 5D arrays.", "right": "pull_request:43486"}, {"accept": false, "left": "pull_request:42186", "reason": "Eager attention mask slicing fix has nothing to do with a torch MLU type-check guard.", "right": "pull_request:44502"}, {"accept": false, "left": "pull_request:43916", "reason": "Input-embed naming and generation slicing changes are unrelated to test-trigger configuration for cache_utils.", "right": "pull_request:43920"}, {"accept": false, "left": "pull_request:43953", "reason": "Both touch conversion mapping, but 43953 is a specific qwen3_vl_moe mapping fix while 45340 is a broader VLM conversion-mapping cleanup; not the same concrete change.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44037", "reason": "qwen3_vl_moe weight-mapping logic and a flash-attn compatibility/deprecation PR are unrelated.", "right": "pull_request:45061"}, {"accept": false, "left": "pull_request:41722", "reason": "A Tamil README docs addition is unrelated to adding MiniMax-M2 model support.", "right": "pull_request:42028"}, {"accept": false, "left": "pull_request:43486", "reason": "Video batching for 5D arrays is unrelated to simplifying generation input preparation.", "right": "pull_request:44126"}, {"accept": false, "left": "pull_request:45078", "reason": "Tokenization auto-conversion behavior and VLM conversion-mapping fixes are different issues with no shared concrete code-path.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43823", "reason": "Both add model support, but for different models and code paths; not a duplicate change.", "right": "pull_request:44251"}], "summary": "This set is mostly heterogeneous false-positive pairings: the PRs cover unrelated model additions, loading fixes, docs, and utility changes rather than the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43683|pull_request:44502", "pull_request:43683|pull_request:44544", "pull_request:43326|pull_request:43486", "pull_request:42186|pull_request:44502", "pull_request:43916|pull_request:43920", "pull_request:43953|pull_request:45340", "pull_request:44037|pull_request:45061", "pull_request:41722|pull_request:42028", "pull_request:43486|pull_request:44126", "pull_request:45078|pull_request:45340", "pull_request:43823|pull_request:44251"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19840, "estimated_input_tokens": 9792, "item_count": 17, "node_count": 17, "serialized_chars": 39166, "soft_pair_count": 9}, "cached_at": "2026-04-14T17:19:15Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "bf86a09f221a633bfba34620862ea3901afb9e5458e5493c09245ba6ce8d82de", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40742", "pull_request:42823", "pull_request:42993", "pull_request:43445", "pull_request:43578", "pull_request:43740", "pull_request:43821", "pull_request:43902", "pull_request:43916", "pull_request:43919", "pull_request:43942", "pull_request:43946", "pull_request:44040", "pull_request:44235", "pull_request:45061", "pull_request:45164", "pull_request:45212"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "If forced to pick one representative PR, 44040 is the strongest self-contained bugfix with tests, but it is still unrelated to most of the other items and should not be treated as a duplicate anchor.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the items do not form a duplicate set, so there is no single change that represents the whole cluster.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43740", "reason": "Qwen2 PyTorch-version cleanup vs documentation stack updates; same broad release cleanup theme, but not the same bug or code path.", "right": "pull_request:43902"}, {"accept": false, "left": "pull_request:43578", "reason": "Youtu test repo alignment vs adding/fixing Fuyu tokenizer class mapping; different models and different changes.", "right": "pull_request:44235"}, {"accept": false, "left": "pull_request:42823", "reason": "LASR tokenizer/test update vs Bark test fix; both are tests/model support, but they address different artifacts and failures.", "right": "pull_request:43942"}, {"accept": false, "left": "pull_request:40742", "reason": "Assume-torch import restructuring in integrations vs Dockerfile package source changes; unrelated fixes.", "right": "pull_request:43946"}, {"accept": false, "left": "pull_request:42823", "reason": "LASR integration/test work vs MoE router/conversion mapping fixes; no shared underlying bug.", "right": "pull_request:43445"}, {"accept": false, "left": "pull_request:42993", "reason": "Both mention Intel XPU, but one adds benchmark_v2 platform support and the other adjusts MusicFlamingo tests; too broad to merge as one PR.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:43919", "reason": "Gradient synchronization fix in trainer vs Gemma3n get_audio_features fix; different code paths and failure modes.", "right": "pull_request:44040"}, {"accept": false, "left": "pull_request:43821", "reason": "PEFT quantization typo fix vs GenerationMixin cache NoneType fix; unrelated subsystems and bugs.", "right": "pull_request:45164"}, {"accept": false, "left": "pull_request:43916", "reason": "Naming harmonization (`input_embeds`/`inputs_embeds`) across docs/code vs Flash Attention BC/deprecation helper; not the same change.", "right": "pull_request:45061"}], "summary": "This cluster is a false-positive mix of unrelated PRs: docs cleanup, tokenizer/model test fixes, generation/trainer bugfixes, XPU enablement, and MoE/router changes. The soft pairs only share broad themes, not the same concrete bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43740|pull_request:43902", "pull_request:43578|pull_request:44235", "pull_request:42823|pull_request:43942", "pull_request:40742|pull_request:43946", "pull_request:42823|pull_request:43445", "pull_request:42993|pull_request:45212", "pull_request:43919|pull_request:44040", "pull_request:43821|pull_request:45164", "pull_request:43916|pull_request:45061"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 21112, "estimated_input_tokens": 10428, "item_count": 17, "node_count": 17, "serialized_chars": 41711, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:19:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "389a75442fd799375a715611e01be4e4974155f8b9b2fb7f9335d64fb4bc0794", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41549", "pull_request:43445", "pull_request:43486", "pull_request:43683", "pull_request:43781", "pull_request:43916", "pull_request:43953", "pull_request:43956", "pull_request:44037", "pull_request:44125", "pull_request:44293", "pull_request:44330", "pull_request:44414", "pull_request:44433", "pull_request:44502", "pull_request:44544", "pull_request:45155"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44037 is the best representative only in the narrow sense that it is a concrete merged code fix with a localized code-path change and the clearest linkage to one of the few semantically similar PRs in the set, but it still does not subsume the cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR cleanly represents the cluster; the items are mostly unrelated one-off fixes, so any canonical choice would be arbitrary.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44414", "reason": "Different bugs: one reduces tqdm verbosity in model loading, the other changes PEFT adapter loading with tensor parallelism.", "right": "pull_request:45155"}, {"accept": false, "left": "pull_request:41549", "reason": "Both are model-internals PRs, but they fix unrelated areas: DETR refactoring vs removing cache_position from many models.", "right": "pull_request:44330"}, {"accept": false, "left": "pull_request:44293", "reason": "Unrelated utility changes: export/error cleanup versus loading progress verbosity.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:43781", "reason": "Different code paths and bugs: local adapter/base-model resolution vs Qwen3-VL-MoE conversion mapping.", "right": "pull_request:43953"}, {"accept": false, "left": "pull_request:44125", "reason": "Both are loading/validation fixes, but one is video processor class resolution and the other is multimodal token-type validation; not the same bug.", "right": "pull_request:44433"}, {"accept": false, "left": "pull_request:43445", "reason": "Both touch conversion mapping, but they target different model families and different weight-renaming problems; too broad to merge as duplicates.", "right": "pull_request:43956"}, {"accept": false, "left": "pull_request:43445", "reason": "Same broad subsystem, but the fixes are for different MoE models and different mapping logic, so they are not the same underlying change.", "right": "pull_request:43953"}, {"accept": false, "left": "pull_request:43916", "reason": "Unrelated fixes: generation/embed terminology cleanup vs ANSI styling in loading reports.", "right": "pull_request:44544"}, {"accept": false, "left": "pull_request:43486", "reason": "Different problems in different utilities: batched video handling vs generation input-embeds terminology/slicing.", "right": "pull_request:43916"}, {"accept": false, "left": "pull_request:44037", "reason": "No real overlap beyond being utility/modeling PRs; qwen3_vl_moe weight mapping and a torch MLU type-check fix are unrelated.", "right": "pull_request:44502"}, {"accept": false, "left": "pull_request:43683", "reason": "Both involve loading/conversion internals, but one fixes forced fp32 upcasting and the other fixes qwen3_vl_moe transpose/mapping behavior; different bugs.", "right": "pull_request:44037"}], "summary": "This cluster is heterogeneous: it mixes unrelated merged PRs touching model loading, PEFT, generation docs, video utils, MoE conversion mappings, and a few model-specific fixes. The soft edges mostly connect PRs that only share broad infrastructure or file overlap, not the same underlying bug/change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44414|pull_request:45155", "pull_request:41549|pull_request:44330", "pull_request:44293|pull_request:44414", "pull_request:43781|pull_request:43953", "pull_request:44125|pull_request:44433", "pull_request:43445|pull_request:43956", "pull_request:43445|pull_request:43953", "pull_request:43916|pull_request:44544", "pull_request:43486|pull_request:43916", "pull_request:44037|pull_request:44502", "pull_request:43683|pull_request:44037"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20346, "estimated_input_tokens": 10045, "item_count": 17, "node_count": 17, "serialized_chars": 40180, "soft_pair_count": 14}, "cached_at": "2026-04-14T17:23:37Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "42a4bbec7c655d11e7d08b0e20d2b7f33b172a612f0a6b5b06a0e06d3021601c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42186", "pull_request:42823", "pull_request:43400", "pull_request:43578", "pull_request:43920", "pull_request:44040", "pull_request:44126", "pull_request:44235", "pull_request:44330", "pull_request:44502", "pull_request:44544", "pull_request:44833", "pull_request:44834", "pull_request:45061", "pull_request:45155", "pull_request:45284", "pull_request:45336"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44330 is the closest broad representative of one recurring theme (cache_position cleanup across multiple models), but it still does not subsume the other PRs or form a true duplicate cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR: the cluster is heterogeneous, and the apparent overlaps are only thematic rather than duplicate-level.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43578", "reason": "Both touch tests/examples, but one only updates Youtu test repos while the other reruns the modular converter across many example files; different changes.", "right": "pull_request:44834"}, {"accept": false, "left": "pull_request:44235", "reason": "Fuyu tokenizer registration and modular example regeneration are unrelated changes.", "right": "pull_request:44834"}, {"accept": false, "left": "pull_request:44126", "reason": "Both relate to generation/cache handling, but 44126 simplifies prepare_inputs_for_generation while 44330 removes cache_position across model forwards; not the same concrete fix.", "right": "pull_request:44330"}, {"accept": false, "left": "pull_request:44330", "reason": "cache_position cleanup in model code is unrelated to the MLU hasattr/type-checker fix in import_utils.", "right": "pull_request:44502"}, {"accept": false, "left": "pull_request:44126", "reason": "These address different code paths: generation mask prep vs torch.mlu availability checking.", "right": "pull_request:44502"}, {"accept": false, "left": "pull_request:43920", "reason": "Updating the test fetcher to rerun cache_utils tests is not the same change as removing cache_position from models.", "right": "pull_request:44330"}, {"accept": false, "left": "pull_request:43920", "reason": "A test-selection change and a generation-input simplification are not duplicate fixes.", "right": "pull_request:44126"}, {"accept": false, "left": "pull_request:42186", "reason": "Mask slicing in eager attention and generation input preparation are both attention-related, but they fix different bugs in different paths.", "right": "pull_request:44126"}, {"accept": false, "left": "pull_request:42823", "reason": "LASR tokenizer mapping and PEFT adapter loading with tensor parallelism are unrelated.", "right": "pull_request:45155"}, {"accept": false, "left": "pull_request:44040", "reason": "Gemma3n audio feature fixes and modular converter/example cleanup are different changes.", "right": "pull_request:44833"}, {"accept": false, "left": "pull_request:45061", "reason": "Flash-Attn version/deprecation plumbing is unrelated to PEFT adapter loading with TP.", "right": "pull_request:45155"}, {"accept": false, "left": "pull_request:44502", "reason": "Both touch import_utils, but one fixes MLU bf16 availability and the other adds a flash-attn helper/deprecation cycle; not the same bug or change.", "right": "pull_request:45061"}, {"accept": false, "left": "pull_request:44544", "reason": "ANSI styling in loading reports and Gemma4 shared-weight removal are unrelated.", "right": "pull_request:45336"}, {"accept": false, "left": "pull_request:43400", "reason": "Adding a new tokenizer and adjusting Qwen2 CI expectations are different changes with no shared underlying fix.", "right": "pull_request:45284"}], "summary": "Mostly unrelated merged PRs with only loose thematic overlap in a few cleanup areas (generation/cache_position, import_utils, modular examples). None of the soft pairs look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43578|pull_request:44834", "pull_request:44235|pull_request:44834", "pull_request:44126|pull_request:44330", "pull_request:44330|pull_request:44502", "pull_request:44126|pull_request:44502", "pull_request:43920|pull_request:44330", "pull_request:43920|pull_request:44126", "pull_request:42186|pull_request:44126", "pull_request:42823|pull_request:45155", "pull_request:44040|pull_request:44833", "pull_request:45061|pull_request:45155", "pull_request:44502|pull_request:45061", "pull_request:44544|pull_request:45336", "pull_request:43400|pull_request:45284"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22080, "estimated_input_tokens": 10912, "item_count": 18, "node_count": 18, "serialized_chars": 43646, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:24:18Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c708a9ea4bbcbba651ae5b25d7a464d22bc381dc64d1de8947f4904d227bf7e3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42823", "pull_request:43424", "pull_request:43445", "pull_request:43578", "pull_request:43588", "pull_request:43690", "pull_request:43702", "pull_request:43712", "pull_request:43913", "pull_request:43916", "pull_request:43953", "pull_request:44125", "pull_request:44293", "pull_request:44414", "pull_request:44433", "pull_request:44634", "pull_request:44866", "pull_request:45075"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44433 is the strongest standalone bugfix in the set: it addresses a concrete multimodal error path across several model files and is more representative than the test/docs/typing-only PRs.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR stands out because the set is not a coherent duplicate cluster; the items span separate subsystems and unrelated changes.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43913", "reason": "Different changes: Qwen3 VL MoE weight-mapping/transposition versus a generation input_embeds naming cleanup. Not the same code-path bug.", "right": "pull_request:43916"}, {"accept": false, "left": "pull_request:43578", "reason": "Unrelated: Youtu test repo alignment versus export cleanup internals. Shared testing context only, not the same fix.", "right": "pull_request:44293"}, {"accept": false, "left": "pull_request:43445", "reason": "MoE router corrections versus tqdm verbosity changes are distinct areas and distinct bugs.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:44634", "reason": "Both touch lfm2 caching, but one fixes a kernel-path sequence-length bug while the other reworks cache alignment/state handling. Not the same concrete issue.", "right": "pull_request:44866"}, {"accept": false, "left": "pull_request:42823", "reason": "LASR integration test/tokenizer mapping versus executorch dynamic-shape export tests are unrelated changes.", "right": "pull_request:43424"}, {"accept": false, "left": "pull_request:43690", "reason": "One fixes a notebook/repl crash; the other is a typing/comment cleanup. No shared underlying bug.", "right": "pull_request:43712"}, {"accept": false, "left": "pull_request:43588", "reason": "Qwen3 Omni return-type annotation fix is unrelated to the multimodal-token-type error handling change.", "right": "pull_request:44433"}, {"accept": false, "left": "pull_request:44125", "reason": "Video processor loading error handling versus model-loading progress verbosity are different behaviors and code paths.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:43702", "reason": "Both are model additions/docs, but they introduce different models and independent codebases; not a duplicate change.", "right": "pull_request:45075"}, {"accept": false, "left": "pull_request:43588", "reason": "Type annotation fix in Qwen3 Omni MoE versus Qwen3 VL MoE checkpoint conversion mapping are unrelated.", "right": "pull_request:43953"}], "summary": "This cluster is highly heterogeneous: it mixes unrelated model bugfixes, test-only updates, docs/model additions, and utility cleanups. All proposed soft edges look like false positives rather than duplicates or mergeable PRs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43913|pull_request:43916", "pull_request:43578|pull_request:44293", "pull_request:43445|pull_request:44414", "pull_request:44634|pull_request:44866", "pull_request:42823|pull_request:43424", "pull_request:43690|pull_request:43712", "pull_request:43588|pull_request:44433", "pull_request:44125|pull_request:44414", "pull_request:43702|pull_request:45075", "pull_request:43588|pull_request:43953"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20598, "estimated_input_tokens": 10171, "item_count": 18, "node_count": 18, "serialized_chars": 40684, "soft_pair_count": 9}, "cached_at": "2026-04-14T17:24:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1dadf1a9e6ee3b16d0b1aa3e043527c74ed936731ae56234d36316e111e0991e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41763", "pull_request:43178", "pull_request:43424", "pull_request:43486", "pull_request:43565", "pull_request:43578", "pull_request:43588", "pull_request:43683", "pull_request:43953", "pull_request:43973", "pull_request:44293", "pull_request:44812", "pull_request:45061", "pull_request:45078", "pull_request:45123", "pull_request:45170", "pull_request:45190", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "If forced to pick a representative, pull_request:44293 is the strongest candidate because it is a broader merged core-utility change with extensive follow-up tests. Even so, it is not a true cluster canonical because the items are unrelated.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR: the cluster is heterogeneous, with no shared underlying feature or defect tying the PRs together.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44293", "reason": "Different changes: strict export cleanup vs flash-attn compatibility/deprecation helpers in import utils. Shared file is incidental.", "right": "pull_request:45061"}, {"accept": false, "left": "pull_request:41763", "reason": "Both add model docs/registrations, but for different models (TimesFM2.5 vs Lfm2Audio). Not the same change.", "right": "pull_request:43973"}, {"accept": false, "left": "pull_request:43565", "reason": "Both touch repo-consistency workflow, but they fix different bot workflows and different failures.", "right": "pull_request:44812"}, {"accept": false, "left": "pull_request:43953", "reason": "Conversion-mapping fix for Qwen3VL-MoE is unrelated to tokenizer-auto conversion behavior.", "right": "pull_request:45078"}, {"accept": false, "left": "pull_request:43486", "reason": "Video batching fix vs state-dict loading fp32 upcasting fix; different code paths and bugs.", "right": "pull_request:43683"}, {"accept": false, "left": "pull_request:43424", "reason": "Executorch dynamic-shape export test and Youtu repo-alignment test are unrelated.", "right": "pull_request:43578"}, {"accept": false, "left": "pull_request:43178", "reason": "AMD pipeline CI fix and PP chart2table test-shape adjustment are separate test/CI changes.", "right": "pull_request:45123"}, {"accept": false, "left": "pull_request:45170", "reason": "Conversion-mapping/model rename work vs CLI typing cleanup; no shared underlying issue.", "right": "pull_request:45190"}, {"accept": false, "left": "pull_request:43588", "reason": "Qwen3 omni MoE feature-type fix and VLM conversion-mapping fix address different model internals.", "right": "pull_request:45340"}], "summary": "This cluster is a grab bag of unrelated PRs across model additions, workflow fixes, utilities, tests, and conversion-mapping tweaks. None of the soft pairs look like the same underlying bug or change, so they should all stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44293|pull_request:45061", "pull_request:41763|pull_request:43973", "pull_request:43565|pull_request:44812", "pull_request:43953|pull_request:45078", "pull_request:43486|pull_request:43683", "pull_request:43424|pull_request:43578", "pull_request:43178|pull_request:45123", "pull_request:45170|pull_request:45190", "pull_request:43588|pull_request:45340"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18222, "estimated_input_tokens": 8983, "item_count": 18, "node_count": 18, "serialized_chars": 35930, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:25:27Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a814bc9be5c9db588ed0926aa7885a4f0cfd1af1ab80c54d63d57459c5fe8075", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43424", "pull_request:43426", "pull_request:43488", "pull_request:43532", "pull_request:43821", "pull_request:43919", "pull_request:43920", "pull_request:43982", "pull_request:44040", "pull_request:44163", "pull_request:44286", "pull_request:44293", "pull_request:44502", "pull_request:44544", "pull_request:44566", "pull_request:45164", "pull_request:45284", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45340 is the strongest standalone PR here because it fixes real conversion-mapping behavior in core code and is broader/more representative than the mostly test, typing, or housekeeping changes.", "canonical_issue_reason": null, "canonical_pr_reason": "No true duplicate PR cluster is evident; if forced to pick a representative, PR 45340 is the most substantive merged code change in the set and not merely a test or tooling tweak.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43821", "reason": "Typo fix in PEFT quantization mapping vs. conversion-mapping updates for VLMs; different code paths and different underlying bugs.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44040", "reason": "Gemma3n audio feature fix is unrelated to model conversion mapping changes; no shared concrete bug.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44163", "reason": "ESM attention_mask/token_dropout fix is a separate model forward-path issue, not the same change as conversion mapping edits.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43920", "reason": "One changes test discovery for cache_utils; the other hardens export cleanup/runtime checks. Shared tooling theme only, not the same bug.", "right": "pull_request:44293"}, {"accept": false, "left": "pull_request:43919", "reason": "Gradient sync logic in training is unrelated to loading-report ANSI styling; only a broad repository overlap.", "right": "pull_request:44544"}, {"accept": false, "left": "pull_request:44502", "reason": "Torch bf16 availability/type-check fix and generation cache NoneType handling affect different utility functions and different failure modes.", "right": "pull_request:45164"}, {"accept": false, "left": "pull_request:43424", "reason": "ExecuTorch dynamic-shape test addition and Fouroversix quantization support are distinct features/fixes, not one mergeable PR.", "right": "pull_request:44286"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenizer cleanup restoration and CLI typing expansion are unrelated changes; shared repo area is insufficient.", "right": "pull_request:44566"}, {"accept": false, "left": "pull_request:43982", "reason": "A blame-ignore file addition and Qwen2 CI expectation updates are unrelated maintenance changes.", "right": "pull_request:45284"}, {"accept": false, "left": "pull_request:43488", "reason": "Both are '[don't merge]' or diagnostic PRs, but they modify different workflows/files and do not address the same underlying bug or change.", "right": "pull_request:43532"}], "summary": "These soft matches are superficial and the PRs are largely unrelated: most pairs touch different bugs, different models, or unrelated infrastructure changes. I would reject all soft edges."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43821|pull_request:45340", "pull_request:44040|pull_request:45340", "pull_request:44163|pull_request:45340", "pull_request:43920|pull_request:44293", "pull_request:43919|pull_request:44544", "pull_request:44502|pull_request:45164", "pull_request:43424|pull_request:44286", "pull_request:43426|pull_request:44566", "pull_request:43982|pull_request:45284", "pull_request:43488|pull_request:43532"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20676, "estimated_input_tokens": 10210, "item_count": 17, "node_count": 17, "serialized_chars": 40839, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:26:05Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9e8a4db6980b412dd06f3d847b150bddb4f873b7ad64da96408a34eaf61c8db0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43486", "pull_request:43740", "pull_request:43767", "pull_request:43793", "pull_request:43795", "pull_request:43823", "pull_request:43919", "pull_request:43930", "pull_request:44037", "pull_request:44544", "pull_request:44566", "pull_request:44601", "pull_request:44724", "pull_request:45164", "pull_request:45224", "pull_request:45289", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No single PR is a good global representative for this cluster; the items do not form one duplicate family.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the cluster mixes unrelated fixes and model additions across different code paths.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44724", "reason": "Both touch auto-model mappings, but one adds missing/incorrect entries while the other removes unnecessary entries; different change intents and code paths.", "right": "pull_request:45224"}, {"accept": false, "left": "pull_request:43767", "reason": "PP-Chart2Table model support vs native pipeline-parallel support; unrelated features and not the same bug/change.", "right": "pull_request:44601"}, {"accept": false, "left": "pull_request:43793", "reason": "PP-OCRv5_mobile_rec model support is unrelated to adding native PP support in core loading.", "right": "pull_request:44601"}, {"accept": false, "left": "pull_request:43795", "reason": "PP-OCRv5_server_rec model support and native PP loading support are different changes in different subsystems.", "right": "pull_request:44601"}, {"accept": false, "left": "pull_request:43486", "reason": "Video batching with 5D arrays and gradient sync behavior are unrelated bugs.", "right": "pull_request:43919"}, {"accept": false, "left": "pull_request:44544", "reason": "ANSI styling in loading reports is unrelated to the GenerationMixin NoneType fix.", "right": "pull_request:45164"}, {"accept": false, "left": "pull_request:43740", "reason": "PyTorch version cleanup in modular_qwen2 and autocast handling improvements are different code-path fixes.", "right": "pull_request:43930"}, {"accept": false, "left": "pull_request:43823", "reason": "Adding MobileLLM support is unrelated to fixing conversion mappings for VLMs.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:45289", "reason": "RoPE warning relaxation and VLM conversion-mapping fixes address different concerns and are not mergeable as one PR.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44037", "reason": "Qwen3-VL MoE weight-mapping/dim-check changes and CLI typing extensions are unrelated.", "right": "pull_request:44566"}, {"accept": false, "left": "pull_request:43486", "reason": "Video batching bugfix and GenerationMixin cache-prep TypeError fix do not share the same underlying issue.", "right": "pull_request:45164"}], "summary": "This cluster is heterogeneous: model support additions, auto-mapping cleanup, and unrelated bugfix/docs PRs. None of the soft pairs look like the same underlying change, so no duplicate merges are warranted."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44724|pull_request:45224", "pull_request:43767|pull_request:44601", "pull_request:43793|pull_request:44601", "pull_request:43795|pull_request:44601", "pull_request:43486|pull_request:43919", "pull_request:44544|pull_request:45164", "pull_request:43740|pull_request:43930", "pull_request:43823|pull_request:45340", "pull_request:45289|pull_request:45340", "pull_request:44037|pull_request:44566", "pull_request:43486|pull_request:45164"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19710, "estimated_input_tokens": 9727, "item_count": 18, "node_count": 18, "serialized_chars": 38907, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:26:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "066f64f59dcdf85bde9d01c51df19fff176499cd6222cbe3266da646c720cba5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43424", "pull_request:43445", "pull_request:43464", "pull_request:43578", "pull_request:43592", "pull_request:43821", "pull_request:43942", "pull_request:43956", "pull_request:43989", "pull_request:44040", "pull_request:44250", "pull_request:44286", "pull_request:44330", "pull_request:44502", "pull_request:44827", "pull_request:44834", "pull_request:45033", "pull_request:45284"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No single PR is a good global representative because the cluster is not a duplicate set and the changes do not converge on one underlying code path.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the cluster does not center on one concrete change; the items address distinct bugs, model-specific test updates, and unrelated utility fixes.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43445", "reason": "Different areas: MoE router weight mapping vs MarkupLM integration-test dtype tweak. No shared bug or mergeable combined fix.", "right": "pull_request:43464"}, {"accept": false, "left": "pull_request:43942", "reason": "Unrelated: Bark test rewrite vs training_args report_to handling. Similarity is only that both are small fixes.", "right": "pull_request:44250"}, {"accept": false, "left": "pull_request:44502", "reason": "Different scopes: type-checker guard in import_utils vs AMD-specific Qwen2 expectation updates. Not the same code-path problem.", "right": "pull_request:45284"}, {"accept": false, "left": "pull_request:44330", "reason": "Both mention cache_position removal, but one is a broad model cleanup and the other is modular example updates. Not the same concrete fix.", "right": "pull_request:44834"}, {"accept": false, "left": "pull_request:43578", "reason": "Different models and failures: Youtu repo-alignment test changes vs Bark test enablement. No common underlying bug.", "right": "pull_request:43942"}, {"accept": false, "left": "pull_request:43424", "reason": "Executorch dynamic-shape export test and report_to='all' regression are unrelated; only both are test/behavior fixes.", "right": "pull_request:44250"}, {"accept": false, "left": "pull_request:44286", "reason": "Fouroversix quantization/integration work and image-processing BC aliasing solve different issues in different subsystems.", "right": "pull_request:45033"}, {"accept": false, "left": "pull_request:43956", "reason": "Qwen3-VL-MoE weight mapping vs Mistral4 test fixes are unrelated model-specific changes.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:43989", "reason": "AutoVideoProcessor lookup guard for missing torchvision and Gemma3n audio feature fixes are unrelated code paths.", "right": "pull_request:44040"}, {"accept": false, "left": "pull_request:43592", "reason": "Large config-default cleanup across many models vs a PEFT quantization attribute typo. Shared theme is general maintenance only.", "right": "pull_request:43821"}, {"accept": false, "left": "pull_request:43592", "reason": "Shared test file reference is incidental; configuration-default cleanup across models and Gemma3n get_audio_features are not the same bug.", "right": "pull_request:44040"}], "summary": "These PRs are a heterogeneous mix of unrelated fixes and test updates across different models, integrations, and utilities. The soft links are mostly superficial overlaps in subsystem or cleanup style, not the same underlying bug/change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43445|pull_request:43464", "pull_request:43942|pull_request:44250", "pull_request:44502|pull_request:45284", "pull_request:44330|pull_request:44834", "pull_request:43578|pull_request:43942", "pull_request:43424|pull_request:44250", "pull_request:44286|pull_request:45033", "pull_request:43956|pull_request:44827", "pull_request:43989|pull_request:44040", "pull_request:43592|pull_request:43821", "pull_request:43592|pull_request:44040"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19572, "estimated_input_tokens": 9658, "item_count": 17, "node_count": 17, "serialized_chars": 38632, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:27:27Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "bab498e312cc5523352bfb3a21c5395b9844108f3b8120e618a3a4de485b912a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41145", "pull_request:42668", "pull_request:43251", "pull_request:43592", "pull_request:43838", "pull_request:43913", "pull_request:43938", "pull_request:44163", "pull_request:44250", "pull_request:44739", "pull_request:44834", "pull_request:45033", "pull_request:45078", "pull_request:45190", "pull_request:45283", "pull_request:45289", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43592 is the strongest representative by breadth and maturity: it is merged, touches many model configs, and appears to be a substantive, finished fix rather than a narrow test or WIP change.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR emerges: the items target different models, subsystems, and bug types, so the cluster is not a duplicate set.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43592", "reason": "Different bugs in different models: config default values vs ESM attention-mask/token-dropout behavior.", "right": "pull_request:44163"}, {"accept": false, "left": "pull_request:43592", "reason": "Unrelated areas: configuration defaults vs tokenizer auto-conversion/error handling.", "right": "pull_request:45078"}, {"accept": false, "left": "pull_request:43592", "reason": "Configuration-default cleanup is not the same change as RoPE validation warning behavior.", "right": "pull_request:45289"}, {"accept": false, "left": "pull_request:43938", "reason": "Both are failing-test fixes, but for different models and different root causes; not the same underlying bug.", "right": "pull_request:44739"}, {"accept": false, "left": "pull_request:45033", "reason": "Backwards-compatibility aliasing in image processing and CLI typing fixes are unrelated changes.", "right": "pull_request:45190"}, {"accept": false, "left": "pull_request:43251", "reason": "Cross-entropy kwargs passthrough and qwen3_vl_moe weight-mapping/sentinel logic address different code paths.", "right": "pull_request:43913"}, {"accept": false, "left": "pull_request:42668", "reason": "A robust-from-pretrained processor change is not the same as updating modular example templates.", "right": "pull_request:44834"}, {"accept": false, "left": "pull_request:41145", "reason": "Both are Qwen3-related support PRs, but one adds Rope kernel support and the other adds Qwen3-ASR support; distinct features and code paths.", "right": "pull_request:43838"}, {"accept": false, "left": "pull_request:43838", "reason": "Qwen3-ASR model support and Qwen3.5 GGUF loading support are separate additions with different targets.", "right": "pull_request:45283"}, {"accept": false, "left": "pull_request:44250", "reason": "TrainingArgs report_to handling and conversion mapping fixes for VLMs are unrelated subsystems and bugs.", "right": "pull_request:45340"}], "summary": "This cluster is a loose grab bag of unrelated PRs: model-support additions, config/default fixes, test-only adjustments, and utility changes. I don\u2019t see any true duplicate pair among the soft edges."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43592|pull_request:44163", "pull_request:43592|pull_request:45078", "pull_request:43592|pull_request:45289", "pull_request:43938|pull_request:44739", "pull_request:45033|pull_request:45190", "pull_request:43251|pull_request:43913", "pull_request:42668|pull_request:44834", "pull_request:41145|pull_request:43838", "pull_request:43838|pull_request:45283", "pull_request:44250|pull_request:45340"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20316, "estimated_input_tokens": 10030, "item_count": 18, "node_count": 18, "serialized_chars": 40120, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:28:01Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b4299549867765219c4b7c0de2c12183b130d0fd990a08d735a2285d6bcfff45", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42668", "pull_request:43424", "pull_request:43445", "pull_request:43690", "pull_request:43823", "pull_request:43948", "pull_request:44082", "pull_request:44125", "pull_request:44296", "pull_request:44330", "pull_request:44433", "pull_request:44502", "pull_request:44544", "pull_request:44883", "pull_request:45033", "pull_request:45221", "pull_request:45284", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44330 is the strongest canonical representative because it is a merged, concrete runtime fix touching model forward paths across multiple architectures, unlike the other candidates which are mostly unrelated tests, docs, or narrow compatibility tweaks.", "canonical_issue_reason": null, "canonical_pr_reason": "No true duplicate PR stands out; if one representative must be chosen, PR #44330 is the most substantive merged code-path change with broad model impact and is better representative than the small test/doc/config-only edits.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44433", "reason": "Different problems: multimodal RoPE error handling vs torch MLU bf16 availability guard. No shared code path or change intent.", "right": "pull_request:44502"}, {"accept": false, "left": "pull_request:42668", "reason": "Unrelated: processor-from-pretrained robustness vs adding a new MobileLLM model implementation.", "right": "pull_request:43823"}, {"accept": false, "left": "pull_request:44125", "reason": "Both are loader-related, but one is about video processor lookup errors and the other about audio-from-video container detection. Different subsystems and fixes.", "right": "pull_request:45221"}, {"accept": false, "left": "pull_request:44296", "reason": "Config auto-docstring generation is unrelated to AMD CI expectation updates in Qwen2 tests.", "right": "pull_request:45284"}, {"accept": false, "left": "pull_request:44883", "reason": "Different areas: dtype selection from state dict vs checkpoint conversion mappings for VLMs.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43690", "reason": "Notebook/repl crash guard in modeling_utils is unrelated to conversion mapping changes.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:43445", "reason": "MoE router weight-renaming/model conversion fix is unrelated to a backwards-compatibility alias for an image-processing keyword helper.", "right": "pull_request:45033"}, {"accept": false, "left": "pull_request:43948", "reason": "Both touch model internals, but one fixes image token counting/preprocessing while the other removes cache_position in forward paths. Not the same bug or mergeable change.", "right": "pull_request:44330"}, {"accept": false, "left": "pull_request:43424", "reason": "Executorch dynamic-shape export testing is unrelated to image-processing kwargs compatibility.", "right": "pull_request:45033"}, {"accept": false, "left": "pull_request:44082", "reason": "PatchTSMixer post_init behavior and loading-report ANSI styling are unrelated changes in different components.", "right": "pull_request:44544"}], "summary": "These pull requests are broadly unrelated. The soft-similarity links are driven by shared Transformers infrastructure or adjacent model areas, but each PR fixes a different bug, adds a different feature, or updates a different test suite. None look like the same underlying change and they should not be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44433|pull_request:44502", "pull_request:42668|pull_request:43823", "pull_request:44125|pull_request:45221", "pull_request:44296|pull_request:45284", "pull_request:44883|pull_request:45340", "pull_request:43690|pull_request:45340", "pull_request:43445|pull_request:45033", "pull_request:43948|pull_request:44330", "pull_request:43424|pull_request:45033", "pull_request:44082|pull_request:44544"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20006, "estimated_input_tokens": 9875, "item_count": 18, "node_count": 18, "serialized_chars": 39498, "soft_pair_count": 12}, "cached_at": "2026-04-14T17:29:22Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ee880a691d822f33d5dba6ab0c6d0b02dd31a34d19392d7202df2c117bfaa00d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42028", "pull_request:43251", "pull_request:43486", "pull_request:43588", "pull_request:43702", "pull_request:43821", "pull_request:43823", "pull_request:43982", "pull_request:44040", "pull_request:44053", "pull_request:44082", "pull_request:44163", "pull_request:44236", "pull_request:44414", "pull_request:44544", "pull_request:45078", "pull_request:45155", "pull_request:45289"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44053 is the cleanest standalone code fix in the only near-match pair; it is small, focused, and directly edits the PEFT conversion path, but the cluster as a whole is too heterogeneous to have a true canonical PR.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43251", "reason": "Different bugs: cross-entropy kwargs vs a PEFT mapping typo. No shared code-path problem.", "right": "pull_request:43821"}, {"accept": false, "left": "pull_request:43982", "reason": "Unrelated changes: a .git-blame-ignore-revs file versus DeepSpeed zero3 initialization config handling.", "right": "pull_request:44236"}, {"accept": false, "left": "pull_request:43982", "reason": "No overlap in bug or feature scope; one is a blame-ignore file, the other is adapter loading with tensor parallelism.", "right": "pull_request:45155"}, {"accept": false, "left": "pull_request:43486", "reason": "Unrelated video batching fix versus PatchTSMixer post_init behavior. Different models and different failure modes.", "right": "pull_request:44082"}, {"accept": false, "left": "pull_request:44544", "reason": "Loading report ANSI styling is unrelated to tokenizer conversion/error handling.", "right": "pull_request:45078"}, {"accept": false, "left": "pull_request:43821", "reason": "Same file and similar typo theme, but not the same change: 43821 writes `quantization_operations`, while 44053 changes it to `quantization_operation`. The latter is a follow-up correction, not a duplicate mergeable PR.", "right": "pull_request:44053"}, {"accept": false, "left": "pull_request:43823", "reason": "MobileLLM model support and tqdm verbosity reduction are unrelated.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:43588", "reason": "Qwen3 omni video-feature typing fix is unrelated to model-loading progress verbosity.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:44040", "reason": "Gemma3n audio feature fix and tqdm verbosity are different code paths and different bugs.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:44163", "reason": "ESM2 attention_mask/token_dropout bug is unrelated to model-loading verbosity.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:44414", "reason": "Different subsystems and behaviors: loading progress output versus RoPE parameter validation warnings.", "right": "pull_request:45289"}, {"accept": false, "left": "pull_request:42028", "reason": "Both add model support, but for different models and unrelated implementation paths.", "right": "pull_request:43702"}], "summary": "Mostly unrelated PRs spanning model additions, docs, loading/reporting tweaks, and small bug fixes. The only superficially similar pair is the PEFT typo fixes, but they correct different attribute names and should not be deduplicated."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43251|pull_request:43821", "pull_request:43982|pull_request:44236", "pull_request:43982|pull_request:45155", "pull_request:43486|pull_request:44082", "pull_request:44544|pull_request:45078", "pull_request:43821|pull_request:44053", "pull_request:43823|pull_request:44414", "pull_request:43588|pull_request:44414", "pull_request:44040|pull_request:44414", "pull_request:44163|pull_request:44414", "pull_request:44414|pull_request:45289", "pull_request:42028|pull_request:43702"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19714, "estimated_input_tokens": 9729, "item_count": 18, "node_count": 18, "serialized_chars": 38913, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:33:45Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2fb65cbc18d9a9689d84d7f594c8e7ba15d59e278dcc42a2d97d888ba69e5c04", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43251", "pull_request:43426", "pull_request:43711", "pull_request:43823", "pull_request:43913", "pull_request:43973", "pull_request:43982", "pull_request:44126", "pull_request:44163", "pull_request:44250", "pull_request:44293", "pull_request:44386", "pull_request:44801", "pull_request:44827", "pull_request:45044", "pull_request:45170", "pull_request:45221", "pull_request:45283"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43251 is the best overall representative for the cluster because it is the most self-contained, clearly bug-driven, and explicitly linked to an issue; the other PRs are broader feature additions or unrelated fixes.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43251 is the cleanest representative: it is a narrow, concrete bug fix in one code path, and it explicitly targets issue 43240.", "confidence": 0.74, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44386", "reason": "Both touch generation/audio sampling-related code, but they fix different problems: Higgs Audio V2 sampling logic vs a user-friendly error for video files passed to audio loading.", "right": "pull_request:45221"}, {"accept": false, "left": "pull_request:43823", "reason": "A new MobileLLM model implementation and a Qwen3 VL MoE weight-mapping fix are unrelated changes in different model families and code paths.", "right": "pull_request:43913"}, {"accept": false, "left": "pull_request:44126", "reason": "One simplifies generation input preparation; the other fixes a LayerNorm naming/renaming issue across CLIP-like models. No shared underlying bug.", "right": "pull_request:45170"}, {"accept": false, "left": "pull_request:44250", "reason": "Report-to-all regression handling and tokenizer class mapping for DeepSeek/ModernBERT are unrelated; same broad area of config handling only.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:43973", "reason": "Lfm2 audio model addition and Qwen3.5 GGUF loading support are separate feature additions for different model families.", "right": "pull_request:45283"}, {"accept": false, "left": "pull_request:43913", "reason": "Qwen3 VL MoE checkpoint mapping and strict export cleanup address different subsystems and bugs; not mergeable into one fix.", "right": "pull_request:44293"}, {"accept": false, "left": "pull_request:44163", "reason": "ESM2 attention-mask/token-dropout fix and tokenizer hub-class mapping fix are unrelated model-specific issues.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:43711", "reason": "Scheduler initialization order and report_to=\"all\" regression fixes are distinct trainer/integration bugs with no common code-path.", "right": "pull_request:44250"}, {"accept": false, "left": "pull_request:43251", "reason": "Cross-entropy kwargs passthrough and Janus image-generation config handling are unrelated fixes in different modules.", "right": "pull_request:45044"}, {"accept": false, "left": "pull_request:43426", "reason": "Restoring clean_up_tokenization_spaces in the tokenizers backend and Mistral4 test fixes are not the same bug or change.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:43426", "reason": "A tokenization cleanup behavior fix and creating .git-blame-ignore-revs are unrelated maintenance changes.", "right": "pull_request:43982"}], "summary": "This cluster is a grab bag of unrelated PRs rather than a duplicate set. The soft pairs mostly share only broad transformer-domain vocabulary, not the same underlying bug or change, so all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44386|pull_request:45221", "pull_request:43823|pull_request:43913", "pull_request:44126|pull_request:45170", "pull_request:44250|pull_request:44801", "pull_request:43973|pull_request:45283", "pull_request:43913|pull_request:44293", "pull_request:44163|pull_request:44801", "pull_request:43711|pull_request:44250", "pull_request:43251|pull_request:45044", "pull_request:43426|pull_request:44827", "pull_request:43426|pull_request:43982"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 18530, "estimated_input_tokens": 9137, "item_count": 18, "node_count": 18, "serialized_chars": 36546, "soft_pair_count": 12}, "cached_at": "2026-04-14T17:34:30Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0d05a31116eec4099d6fdb7b7bfd0987ae92069b9524b43eaa392fe82f2a106a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42823", "pull_request:43426", "pull_request:43532", "pull_request:43563", "pull_request:43588", "pull_request:43690", "pull_request:43953", "pull_request:43982", "pull_request:43995", "pull_request:44082", "pull_request:44163", "pull_request:44414", "pull_request:44566", "pull_request:44801", "pull_request:44827", "pull_request:44883", "pull_request:45164", "pull_request:45289"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No best PR is appropriate because there is no shared underlying bug/change across the cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR cleanly represents a duplicate cluster here; the items are unrelated rather than variants of one fix.", "confidence": 0.99, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43532", "reason": "Different CI/workflow change vs an isolated test file style-edit prank; not the same bug or change.", "right": "pull_request:43563"}, {"accept": false, "left": "pull_request:44566", "reason": "Typing/CLI refactor is unrelated to tokenizer-class hub mapping fixes.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:43953", "reason": "Qwen3VL-MoE conversion mapping and PatchTSMixer post-init are separate model-specific fixes.", "right": "pull_request:44082"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenizers backend cleanup behavior and dtype guessing in state-dict loading address unrelated code paths.", "right": "pull_request:44883"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenization cleanup and ESM attention_mask/token_dropout are unrelated tokenizer vs model-forward fixes.", "right": "pull_request:44163"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenizers cleanup restoration is unrelated to RoPE parameter warning validation.", "right": "pull_request:45289"}, {"accept": false, "left": "pull_request:43426", "reason": "Tokenization cleanup and Falcon output-collection refactor do not share a concrete bug.", "right": "pull_request:43995"}, {"accept": false, "left": "pull_request:42823", "reason": "LASR tokenizer/test fix is unrelated to adding a git-blame-ignore-revs file.", "right": "pull_request:43982"}, {"accept": false, "left": "pull_request:43690", "reason": "Notebook/repl crash guard in modeling_utils is unrelated to Qwen3VL-MoE conversion mapping.", "right": "pull_request:43953"}, {"accept": false, "left": "pull_request:44801", "reason": "Incorrect tokenizer class mapping and RoPE warning suppression are separate changes.", "right": "pull_request:45289"}, {"accept": false, "left": "pull_request:44414", "reason": "Model-loading tqdm verbosity and Mistral4 test/model fixes are not the same underlying issue.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:43588", "reason": "Qwen3 omni feature return-type fix and GenerationMixin NoneType handling are unrelated runtime bugs.", "right": "pull_request:45164"}], "summary": "This cluster is heterogeneous: it mixes unrelated PRs across model fixes, tokenizer mapping, typing, workflows, and tests. I found no true duplicate pair; all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43532|pull_request:43563", "pull_request:44566|pull_request:44801", "pull_request:43953|pull_request:44082", "pull_request:43426|pull_request:44883", "pull_request:43426|pull_request:44163", "pull_request:43426|pull_request:45289", "pull_request:43426|pull_request:43995", "pull_request:42823|pull_request:43982", "pull_request:43690|pull_request:43953", "pull_request:44801|pull_request:45289", "pull_request:44414|pull_request:44827", "pull_request:43588|pull_request:45164"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 17942, "estimated_input_tokens": 8843, "item_count": 17, "node_count": 17, "serialized_chars": 35372, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:34:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "64fbab37e19e95ce6596f7e316c4327342a8640309ce8657d1f99adec0a1eb85", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43488", "pull_request:43500", "pull_request:43558", "pull_request:43592", "pull_request:43889", "pull_request:43946", "pull_request:43995", "pull_request:44126", "pull_request:44414", "pull_request:44417", "pull_request:44620", "pull_request:44634", "pull_request:44724", "pull_request:44827", "pull_request:44883", "pull_request:45284", "pull_request:45340"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "If forced to pick the most representative PR, 45340 is the strongest standalone substantive fix, but it still is not a canonical duplicate target for the rest of the cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR stands out because the items are not duplicates of one underlying change; they cover unrelated fixes, refactors, test-only edits, and bot/CI experiments.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44620", "reason": "Different problems: one fixes Transformers Serve request handling/422 behavior, the other only updates Qwen2 test expectations for AMD CI.", "right": "pull_request:45284"}, {"accept": false, "left": "pull_request:43995", "reason": "Falcon output-collection refactor is unrelated to reducing tqdm verbosity during model loading; different code paths and goals.", "right": "pull_request:44414"}, {"accept": false, "left": "pull_request:44827", "reason": "Mistral4 test/model work is unrelated to conversion-mapping fixes for VLMs; not the same underlying change.", "right": "pull_request:45340"}, {"accept": false, "left": "pull_request:44126", "reason": "Generation input prep simplification and Neuron kernels integration touch different functionality and are not mergeable as one fix.", "right": "pull_request:44417"}, {"accept": false, "left": "pull_request:43889", "reason": "Jamba fast/slow-path fallback and LFM2 kernel-path cache handling are separate model-specific fixes.", "right": "pull_request:44634"}, {"accept": false, "left": "pull_request:44620", "reason": "Serve 422 handling and Mistral4 test/model changes are unrelated; they do not address the same bug or feature.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:43500", "reason": "Both touch the same test file, but one is a bot-permission/style check and the other is a style-bot edit; no shared underlying defect.", "right": "pull_request:43558"}, {"accept": false, "left": "pull_request:43946", "reason": "Dockerfile dependency cleanup and auto-file registry updates are unrelated repository maintenance changes.", "right": "pull_request:44724"}, {"accept": false, "left": "pull_request:43592", "reason": "Configuration default-value cleanup and dtype guessing logic are separate issues in different parts of the library.", "right": "pull_request:44883"}, {"accept": false, "left": "pull_request:43488", "reason": "Both are repo-bot/check PRs, but they are different experiments with different edits and no common concrete bug or change.", "right": "pull_request:43558"}], "summary": "This cluster is mostly a set of unrelated PRs that only look superficially similar by soft similarity. They span separate subsystems and change different code paths, so none should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44620|pull_request:45284", "pull_request:43995|pull_request:44414", "pull_request:44827|pull_request:45340", "pull_request:44126|pull_request:44417", "pull_request:43889|pull_request:44634", "pull_request:44620|pull_request:44827", "pull_request:43500|pull_request:43558", "pull_request:43946|pull_request:44724", "pull_request:43592|pull_request:44883", "pull_request:43488|pull_request:43558"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 21772, "estimated_input_tokens": 10758, "item_count": 17, "node_count": 17, "serialized_chars": 43029, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:35:33Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "366e238eea3e6438967169c50561a67e89822a3389bcb5d978e79e116a5b956e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41145", "pull_request:42781", "pull_request:43690", "pull_request:43702", "pull_request:43711", "pull_request:43942", "pull_request:44296", "pull_request:44320", "pull_request:44390", "pull_request:44414", "pull_request:44760", "pull_request:44801", "pull_request:44859", "pull_request:44883", "pull_request:45170", "pull_request:45284", "pull_request:45350"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No PR in this cluster is a strong global representative; the closest are broad model-addition PRs, but they target different models and cannot serve as a single canonical change.", "canonical_issue_reason": null, "canonical_pr_reason": "No clear canonical PR: the items span unrelated changes rather than one shared fix or feature.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43942", "reason": "Bark test adjustments and config auto-docstring generation are unrelated changes affecting different subsystems.", "right": "pull_request:44296"}, {"accept": false, "left": "pull_request:41145", "reason": "Both mention rope/Qwen3-like terms, but they target different models and different code paths; not the same change.", "right": "pull_request:44859"}, {"accept": false, "left": "pull_request:43711", "reason": "Scheduler initialization fix vs Qwen2 test expectation updates are unrelated.", "right": "pull_request:45284"}, {"accept": false, "left": "pull_request:44414", "reason": "Model-loading tqdm verbosity and dtype guessing in state-dict loading are distinct bugs in different functions.", "right": "pull_request:44883"}, {"accept": false, "left": "pull_request:44801", "reason": "Tokenizer backend mapping fixes are unrelated to dtype inference.", "right": "pull_request:44883"}, {"accept": false, "left": "pull_request:43690", "reason": "Notebook/Repl custom-model crash fix is unrelated to tokenizer-class hub mapping.", "right": "pull_request:44801"}, {"accept": false, "left": "pull_request:44760", "reason": "Both add model support and touch auto-registry files, but they are for different model families with different implementations.", "right": "pull_request:45350"}, {"accept": false, "left": "pull_request:45170", "reason": "A spelling/renaming fix in CLIP-like conversion is unrelated to Qwen2 expectation changes.", "right": "pull_request:45284"}, {"accept": false, "left": "pull_request:44390", "reason": "NemotronH and Mistral4 are separate model integrations, not the same underlying PR.", "right": "pull_request:44760"}, {"accept": false, "left": "pull_request:42781", "reason": "Both are streaming/audio-related model additions, but they introduce different models and code paths.", "right": "pull_request:43702"}, {"accept": false, "left": "pull_request:43702", "reason": "Moonshine streaming and SAM3-LiteText are unrelated model additions.", "right": "pull_request:44320"}], "summary": "This cluster is heterogeneous: model additions for different architectures, unrelated infra fixes, and test/doc updates. The soft-similarity links are mostly driven by shared vocabulary like model registration or audio/rope/auto files, but they do not describe the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43942|pull_request:44296", "pull_request:41145|pull_request:44859", "pull_request:43711|pull_request:45284", "pull_request:44414|pull_request:44883", "pull_request:44801|pull_request:44883", "pull_request:43690|pull_request:44801", "pull_request:44760|pull_request:45350", "pull_request:45170|pull_request:45284", "pull_request:44390|pull_request:44760", "pull_request:42781|pull_request:43702", "pull_request:43702|pull_request:44320"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20016, "estimated_input_tokens": 9880, "item_count": 18, "node_count": 18, "serialized_chars": 39520, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:36:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8c75099f01542f68094e3b82a96b3bb61045c51e7d54f8ff23e80883025b46cf", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43251", "pull_request:43553", "pull_request:43558", "pull_request:43565", "pull_request:43588", "pull_request:43712", "pull_request:43838", "pull_request:43995", "pull_request:44125", "pull_request:44293", "pull_request:44390", "pull_request:44827", "pull_request:44883", "pull_request:44990", "pull_request:45044", "pull_request:45139", "pull_request:45155", "pull_request:45170"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43838 has the strongest standalone triage value because it is a well-scoped feature addition with explicit tracking, substantial implementation, and test coverage. The other PRs are smaller, narrower, or clearly unrelated maintenance changes.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43838 is the clearest substantive feature PR in the set, with an explicit issue target and broad, coherent scope; it is the best representative if one must be chosen, though it is not a duplicate of the others.", "confidence": 0.87, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44125", "reason": "Video processor loading error handling and PEFT adapter loading are different code paths and different bugs; no plausible single merged fix.", "right": "pull_request:45155"}, {"accept": false, "left": "pull_request:44990", "reason": "One is a small vLLM compatibility/deprecation helper change, the other is a RoPE/docs/cache-related cleanup; they are unrelated changes.", "right": "pull_request:45139"}, {"accept": false, "left": "pull_request:43838", "reason": "Both are large model-support PRs, but for different models and different feature areas (Qwen3-ASR vs Nemotron-H); not the same underlying change.", "right": "pull_request:44390"}, {"accept": false, "left": "pull_request:43251", "reason": "Passing cross-entropy kwargs and ignoring float8/float4 dtypes in state-dict dtype guessing are separate utility fixes with no shared bug.", "right": "pull_request:44883"}, {"accept": false, "left": "pull_request:44827", "reason": "Mistral4 test fixes and a layernorm renaming/conversion mapping change are unrelated.", "right": "pull_request:45170"}, {"accept": false, "left": "pull_request:43712", "reason": "General typing/comment cleanup in modeling_utils is not the same as a Falcon output-interface refactor.", "right": "pull_request:43995"}, {"accept": false, "left": "pull_request:43251", "reason": "Cross-entropy keyword plumbing and Qwen3 Omni feature return typing are unrelated code paths.", "right": "pull_request:43588"}, {"accept": false, "left": "pull_request:43558", "reason": "Both touch CI/workflows, but they fix different bot workflows and are not the same underlying change.", "right": "pull_request:43565"}, {"accept": false, "left": "pull_request:44293", "reason": "Strict export cleanup across many models is unrelated to the layrnorm-to-layernorm conversion fix.", "right": "pull_request:45170"}, {"accept": false, "left": "pull_request:43995", "reason": "Falcon standardized output collection refactor and Janus image-generation bug fix are different models and different problems.", "right": "pull_request:45044"}, {"accept": false, "left": "pull_request:43553", "reason": "These are different PRs about style-bot-related changes; one adds a workflow while the other is a test-file style tweak, not the same change.", "right": "pull_request:43558"}], "summary": "This cluster is a mixed bag of unrelated PRs; the soft pairs are mostly superficial similarity matches and do not look like duplicate fixes or the same change set. All soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44125|pull_request:45155", "pull_request:44990|pull_request:45139", "pull_request:43838|pull_request:44390", "pull_request:43251|pull_request:44883", "pull_request:44827|pull_request:45170", "pull_request:43712|pull_request:43995", "pull_request:43251|pull_request:43588", "pull_request:43558|pull_request:43565", "pull_request:44293|pull_request:45170", "pull_request:43995|pull_request:45044", "pull_request:43553|pull_request:43558"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20206, "estimated_input_tokens": 9975, "item_count": 18, "node_count": 18, "serialized_chars": 39897, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:36:49Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3e58fac9de568e0654392d669a51f17d544a35d01bc16db3e7a507684d53c698", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40755", "pull_request:41145", "pull_request:41224", "pull_request:42993", "pull_request:43385", "pull_request:43555", "pull_request:43558", "pull_request:43579", "pull_request:43707", "pull_request:43778", "pull_request:43823", "pull_request:43858", "pull_request:43942", "pull_request:43948", "pull_request:44417", "pull_request:44827", "pull_request:44883", "pull_request:45285"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "If a single representative is needed, PR 43385 (UVDoc model support) is the strongest standalone example because it is merged, substantial, and broadly scoped across docs, auto classes, modeling, and tests. It is still not a duplicate representative of the rest.", "canonical_issue_reason": null, "canonical_pr_reason": "No clear canonical PR: the items span different models, tools, and bug fixes with no shared underlying change or code path.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43555", "reason": "Different artifacts: workflow permission change vs. unrelated test-file style tweak. Same bot-related theme only superficially.", "right": "pull_request:43558"}, {"accept": false, "left": "pull_request:43942", "reason": "Different bugs in different code paths: Bark test behavior vs Emu3 image preprocessing/image-token counting.", "right": "pull_request:43948"}, {"accept": false, "left": "pull_request:43778", "reason": "One changes Mamba/FalconMamba mixer initialization; the other adds a new MobileLLM model. Not the same change.", "right": "pull_request:43823"}, {"accept": false, "left": "pull_request:43778", "reason": "Mixer weight initialization and dtype inference from state dict are unrelated fixes.", "right": "pull_request:44883"}, {"accept": false, "left": "pull_request:42993", "reason": "Benchmark_v2 Intel XPU platform support is unrelated to Solar Open test XPU coverage.", "right": "pull_request:43579"}, {"accept": false, "left": "pull_request:44417", "reason": "Neuron kernel integration is a separate feature from Mistral4 test fixes and model registration.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:43858", "reason": "GLM MoE DSA model support and Gemma4 export/cache handling are unrelated workstreams.", "right": "pull_request:45285"}, {"accept": false, "left": "pull_request:40755", "reason": "TimesFM covariate forecasting and UVDoc model support are different model features with no shared bug/change.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:40755", "reason": "TimesFM covariate support is unrelated to SLANeXt model support.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:40755", "reason": "TimesFM forecasting with covariates and DINOv3 image-classification support are unrelated.", "right": "pull_request:41224"}, {"accept": false, "left": "pull_request:40755", "reason": "TimesFM covariate support is unrelated to Qwen3 rope kernel support.", "right": "pull_request:41145"}], "summary": "This cluster is not a duplicate set; it mixes unrelated model additions, infrastructure changes, benchmark/test updates, and bug fixes. All soft-link candidates are superficial and should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43555|pull_request:43558", "pull_request:43942|pull_request:43948", "pull_request:43778|pull_request:43823", "pull_request:43778|pull_request:44883", "pull_request:42993|pull_request:43579", "pull_request:44417|pull_request:44827", "pull_request:43858|pull_request:45285", "pull_request:40755|pull_request:43385", "pull_request:40755|pull_request:43707", "pull_request:40755|pull_request:41224", "pull_request:40755|pull_request:41145"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22304, "estimated_input_tokens": 11024, "item_count": 18, "node_count": 18, "serialized_chars": 44094, "soft_pair_count": 10}, "cached_at": "2026-04-14T17:37:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4033e7ddbe11f9132be408ecf7d6d06099f658c45517f156c206d072b97ba8c0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:39895", "pull_request:41272", "pull_request:43424", "pull_request:43702", "pull_request:43946", "pull_request:43948", "pull_request:43955", "pull_request:44051", "pull_request:44390", "pull_request:44502", "pull_request:44544", "pull_request:44739", "pull_request:44808", "pull_request:44827", "pull_request:45213", "pull_request:45261", "pull_request:45284", "pull_request:45394"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No single PR is a good canonical representative: the set does not form a duplicate cluster, and the changes span unrelated models, tooling, CI, and tests.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44390", "reason": "Both add model support, but for different models and codepaths; shared auto/doc wiring is too broad to indicate a duplicate change.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:45261", "reason": "Unrelated fixes: CircleCI workflow null-handling vs Qwen2 AMD test expectations.", "right": "pull_request:45284"}, {"accept": false, "left": "pull_request:44051", "reason": "Different models and different issues: Gemma3n type hints vs Mistral4 test/model adjustments.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:43946", "reason": "Both touch infrastructure/kernel packaging, but one is Docker dependency cleanup and the other is kernel version mapping logic; not the same concrete fix.", "right": "pull_request:43955"}, {"accept": false, "left": "pull_request:44502", "reason": "A type-checker guard in import_utils is unrelated to the model-creation skill and model-scaffold files.", "right": "pull_request:45213"}, {"accept": false, "left": "pull_request:43948", "reason": "An image-token preprocessing bug fix is unrelated to Mistral4 test failures and model integration changes.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:41272", "reason": "Both add model docs/code, but they are distinct model integrations with no shared bug or shared concrete fix.", "right": "pull_request:43702"}, {"accept": false, "left": "pull_request:44544", "reason": "ANSI/style fixes in loading reports are unrelated to the model-creation skill PR.", "right": "pull_request:45213"}, {"accept": false, "left": "pull_request:39895", "reason": "VideoPrism model addition is unrelated to an Executorch dynamic-shape export test.", "right": "pull_request:43424"}, {"accept": false, "left": "pull_request:44739", "reason": "Different model-specific fixes: Janus processor/test changes vs X-CLIP processor video handling.", "right": "pull_request:45394"}], "summary": "These PRs are a heterogeneous set of unrelated changes: new model additions, doc updates, CI/Docker fixes, test expectation tweaks, and small bug fixes. None of the soft-edge pairs look like the same underlying bug or change, so no duplicate merges are justified."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44390|pull_request:44808", "pull_request:45261|pull_request:45284", "pull_request:44051|pull_request:44827", "pull_request:43946|pull_request:43955", "pull_request:44502|pull_request:45213", "pull_request:43948|pull_request:44827", "pull_request:41272|pull_request:43702", "pull_request:44544|pull_request:45213", "pull_request:39895|pull_request:43424", "pull_request:44739|pull_request:45394"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22860, "estimated_input_tokens": 11302, "item_count": 18, "node_count": 18, "serialized_chars": 45207, "soft_pair_count": 18}, "cached_at": "2026-04-14T17:38:11Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e4d42fee181df67e26547486e2def79aef77da846fdd7314f404bafc0781e129", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41763", "pull_request:43486", "pull_request:43838", "pull_request:43858", "pull_request:44386", "pull_request:44395", "pull_request:44400", "pull_request:44413", "pull_request:44542", "pull_request:44633", "pull_request:44634", "pull_request:44760", "pull_request:45187", "pull_request:45213", "pull_request:45283", "pull_request:45285", "pull_request:45298", "pull_request:45429"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:41763", "reason": "Different model additions; they share scaffold files but not the same model or code path.", "right": "pull_request:45298"}, {"accept": false, "left": "pull_request:43486", "reason": "Video batching bugfix vs new-model skill scaffold; unrelated change types.", "right": "pull_request:45213"}, {"accept": false, "left": "pull_request:41763", "reason": "Unrelated additions: TimesFM 2.5 vs Qwen3.5 GGUF loading support.", "right": "pull_request:45283"}, {"accept": false, "left": "pull_request:44386", "reason": "Same model family, but one fixes sampling and the other a torchaudio backend dependency; different problems.", "right": "pull_request:44542"}, {"accept": false, "left": "pull_request:44542", "reason": "Backend dependency fix vs Gemma4 export/integration changes; different code paths.", "right": "pull_request:45285"}, {"accept": false, "left": "pull_request:44400", "reason": "Formatting in loading_report is unrelated to the Higgs Audio tokenizer dependency fix.", "right": "pull_request:44542"}, {"accept": false, "left": "pull_request:45187", "reason": "Temp-file handling in CLI serving is unrelated to a workflow gating change.", "right": "pull_request:45429"}, {"accept": false, "left": "pull_request:44395", "reason": "Kernels security policy vs PEFT conversion mapping; different subsystems and bugs.", "right": "pull_request:44413"}, {"accept": false, "left": "pull_request:44395", "reason": "Security gating for kernel loading is not the same issue as the LFM2 kernel path fix.", "right": "pull_request:44634"}, {"accept": false, "left": "pull_request:44413", "reason": "PEFT mapping logic and LFM2 kernel-path handling are unrelated.", "right": "pull_request:44634"}, {"accept": false, "left": "pull_request:44386", "reason": "Audio sampling fix and kernels security fix are unrelated code paths.", "right": "pull_request:44395"}, {"accept": false, "left": "pull_request:44386", "reason": "Higgs Audio sampling fix vs PEFT mapping fix; no shared underlying bug.", "right": "pull_request:44413"}, {"accept": false, "left": "pull_request:44386", "reason": "Audio generation sampling issue is unrelated to LFM2 kernel-path behavior.", "right": "pull_request:44634"}, {"accept": false, "left": "pull_request:44395", "reason": "Kernel-loading security change is unrelated to Gemma4 export support.", "right": "pull_request:45285"}, {"accept": false, "left": "pull_request:44413", "reason": "PEFT conversion mappings and Gemma4 export are different features.", "right": "pull_request:45285"}, {"accept": false, "left": "pull_request:44634", "reason": "LFM2 kernel-path fix and Gemma4 export tests touch different models and failures.", "right": "pull_request:45285"}, {"accept": false, "left": "pull_request:43838", "reason": "Both are model support PRs, but for different model families and not the same concrete change.", "right": "pull_request:44760"}, {"accept": false, "left": "pull_request:43858", "reason": "GlmMoeDsa model addition and MedASR doc update are unrelated.", "right": "pull_request:44633"}], "summary": "This cluster is a loose thematic bundle of unrelated PRs: several distinct model additions and a handful of isolated bugfixes/docs/workflow changes. None of the soft edges look like true duplicates or the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:41763|pull_request:45298", "pull_request:43486|pull_request:45213", "pull_request:41763|pull_request:45283", "pull_request:44386|pull_request:44542", "pull_request:44542|pull_request:45285", "pull_request:44400|pull_request:44542", "pull_request:45187|pull_request:45429", "pull_request:44395|pull_request:44413", "pull_request:44395|pull_request:44634", "pull_request:44413|pull_request:44634", "pull_request:44386|pull_request:44395", "pull_request:44386|pull_request:44413", "pull_request:44386|pull_request:44634", "pull_request:44395|pull_request:45285", "pull_request:44413|pull_request:45285", "pull_request:44634|pull_request:45285", "pull_request:43838|pull_request:44760", "pull_request:43858|pull_request:44633"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 23842, "estimated_input_tokens": 11793, "item_count": 18, "node_count": 18, "serialized_chars": 47171, "soft_pair_count": 21}, "cached_at": "2026-04-14T17:38:50Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "24ecb8123f75cb3b93406865bd2294b78270111c18dee9101b9d05fbb86672c3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42848", "pull_request:43173", "pull_request:43623", "pull_request:43858", "pull_request:43973", "pull_request:43995", "pull_request:44395", "pull_request:44413", "pull_request:44542", "pull_request:44633", "pull_request:44770", "pull_request:44808", "pull_request:44827", "pull_request:44866", "pull_request:44972", "pull_request:45213", "pull_request:45286", "pull_request:45410"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44808 is the best overall representative of the cluster because it is substantive, merged, and broadly integrated across docs, auto mappings, implementation, and tests. The other PRs are narrower bugfixes, test-only adjustments, or unrelated model-specific changes.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44808 is the strongest representative: it is a merged, comprehensive model-addition change with docs, auto-class wiring, modeling files, and tests, plus an explicit issue target. Compared with the other candidates, it is the most complete and central code-change artifact.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43623", "reason": "Different models and different changes: Zamba weight tying fix vs. adding GlmMoeDsa. No shared code path or underlying bug.", "right": "pull_request:43858"}, {"accept": false, "left": "pull_request:43173", "reason": "A flaky Timm backbone test tolerance tweak is unrelated to a new GLM model addition.", "right": "pull_request:43858"}, {"accept": false, "left": "pull_request:42848", "reason": "Generic attention-mask interface refactor across many models, not the same as adding GlmMoeDsa.", "right": "pull_request:43858"}, {"accept": false, "left": "pull_request:43858", "reason": "Both are model-related, but they add different architectures and fix different code paths; not mergeable as one duplicate change.", "right": "pull_request:44866"}, {"accept": false, "left": "pull_request:43858", "reason": "GlmMoeDsa addition and AMD CI expectation updates are unrelated.", "right": "pull_request:44972"}, {"accept": false, "left": "pull_request:43858", "reason": "Model addition vs. backend dependency fix; different underlying issue.", "right": "pull_request:44542"}, {"accept": false, "left": "pull_request:44542", "reason": "Backend dependency annotation fix is unrelated to LASR documentation updates.", "right": "pull_request:44633"}, {"accept": false, "left": "pull_request:43623", "reason": "Weight-tying fix in Zamba is unrelated to a torchaudio backend dependency change.", "right": "pull_request:44542"}, {"accept": false, "left": "pull_request:43623", "reason": "Zamba weight tying and MedASR doc updates are completely different changes.", "right": "pull_request:44633"}, {"accept": false, "left": "pull_request:44413", "reason": "PEFT conversion mapping fix does not match a documentation-only model update.", "right": "pull_request:44633"}, {"accept": false, "left": "pull_request:43995", "reason": "Falcon output-interface refactor is unrelated to the model-creation skill PR.", "right": "pull_request:45213"}, {"accept": false, "left": "pull_request:44395", "reason": "Kernel security hardening and LASR docs are unrelated.", "right": "pull_request:44633"}, {"accept": false, "left": "pull_request:44413", "reason": "PEFT mapping fix and MedASR docs do not address the same bug or change.", "right": "pull_request:44633"}, {"accept": false, "left": "pull_request:43623", "reason": "Zamba weight tying fix is unrelated to the kernels security fix.", "right": "pull_request:44395"}, {"accept": false, "left": "pull_request:43623", "reason": "Different subsystems and problems: Zamba weights vs PEFT conversion mapping.", "right": "pull_request:44413"}, {"accept": false, "left": "pull_request:43973", "reason": "Both are model-introduction PRs, but for different models and different integration work; no shared underlying bug/change.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:44827", "reason": "Mistral4 test fixes are unrelated to the model-creation skill/pr scaffolding PR.", "right": "pull_request:45213"}, {"accept": false, "left": "pull_request:44770", "reason": "Strict config fixes for several models and an AltCLIP test fix are unrelated.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:44770", "reason": "Different flaky-test/config issues in different models; not the same underlying bug.", "right": "pull_request:45286"}], "summary": "This is a heterogeneous set of PRs: model additions, test-threshold tweaks, refactors, doc updates, and a few bugfixes. None of the soft pairs appear to be the same underlying change or bug; they mostly just share broad subsystem themes like models, autos, or tests."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43623|pull_request:43858", "pull_request:43173|pull_request:43858", "pull_request:42848|pull_request:43858", "pull_request:43858|pull_request:44866", "pull_request:43858|pull_request:44972", "pull_request:43858|pull_request:44542", "pull_request:44542|pull_request:44633", "pull_request:43623|pull_request:44542", "pull_request:43623|pull_request:44633", "pull_request:43173|pull_request:44542", "pull_request:43173|pull_request:44633", "pull_request:43995|pull_request:45213", "pull_request:43858|pull_request:44395", "pull_request:44395|pull_request:44633", "pull_request:44413|pull_request:44633", "pull_request:43623|pull_request:44395", "pull_request:43623|pull_request:44413", "pull_request:43973|pull_request:44808", "pull_request:44827|pull_request:45213", "pull_request:44770|pull_request:45410", "pull_request:44770|pull_request:45286"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 21698, "estimated_input_tokens": 10721, "item_count": 18, "node_count": 18, "serialized_chars": 42881, "soft_pair_count": 13}, "cached_at": "2026-04-14T17:39:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "05376ad84aafcc8892affe53bdb81970a76c04cca93f52390877be182f545acb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40820", "pull_request:43178", "pull_request:43385", "pull_request:43498", "pull_request:43649", "pull_request:43677", "pull_request:43707", "pull_request:43936", "pull_request:44390", "pull_request:44739", "pull_request:44770", "pull_request:45157", "pull_request:45286", "pull_request:45287", "pull_request:45288", "pull_request:45298", "pull_request:45403", "pull_request:45410"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No PR in this set is suitable as a global representative because there is no shared underlying bug/change across the cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR is a clear canonical duplicate target; the items cover different models, subsystems, and fixes.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44770", "reason": "One is a broad config strictness fix across multiple models/tests; the other is a VideMT test expectation tweak. Different code paths and different failure modes.", "right": "pull_request:45287"}, {"accept": false, "left": "pull_request:44770", "reason": "The cohere_asr test patch is unrelated to the config/@strict fix; it only adjusts ASR test behavior and expectations.", "right": "pull_request:45288"}, {"accept": false, "left": "pull_request:43385", "reason": "Both add model support, but for different models with different codepaths and files; these are separate feature PRs, not the same change.", "right": "pull_request:45157"}, {"accept": false, "left": "pull_request:43707", "reason": "SLANeXt support and PrismML Bonsai support are distinct model integrations with no shared underlying bug or merged change.", "right": "pull_request:45157"}, {"accept": false, "left": "pull_request:43178", "reason": "The AMD pipeline CI fix and the AltCLIP test fix address different issues in different areas; only broad CI/test overlap exists.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:43178", "reason": "A CI workflow change is not the same as a Nomic BERT xpu test expectation update; they are separate fixes.", "right": "pull_request:45286"}, {"accept": false, "left": "pull_request:43178", "reason": "The VideMT test-only patch is unrelated to the CI/workflow changes in 43178.", "right": "pull_request:45287"}, {"accept": false, "left": "pull_request:43178", "reason": "The Cohere ASR test fix is a model-specific test adjustment, not the same concrete issue as the CI/reporting changes.", "right": "pull_request:45288"}, {"accept": false, "left": "pull_request:43178", "reason": "The CLIPSeg test helper change is unrelated to the CI workflow fixes in 43178.", "right": "pull_request:45403"}, {"accept": false, "left": "pull_request:43936", "reason": "Moonshine streaming test/device fix and Janus processor/test changes affect different models and different code paths.", "right": "pull_request:44739"}, {"accept": false, "left": "pull_request:43498", "reason": "Backward-compatibility for tie_weights is unrelated to the processing_utils video fps/num_frames handling fix.", "right": "pull_request:43677"}, {"accept": false, "left": "pull_request:40820", "reason": "Adding benchmark model entries and adding NemotronH model support are separate feature additions touching different subsystems.", "right": "pull_request:44390"}, {"accept": false, "left": "pull_request:43649", "reason": "A self-comment CI/reporting workflow change is not the same as adding a new Qwen2.5-VL model implementation; no shared bug or code path.", "right": "pull_request:45298"}], "summary": "This cluster is heterogeneous: it mixes unrelated model-support PRs, test-only fixes, CI/workflow changes, and small bug fixes. The soft-similarity links look like superficial topical overlap rather than duplicate underlying changes."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44770|pull_request:45287", "pull_request:44770|pull_request:45288", "pull_request:43385|pull_request:45157", "pull_request:43707|pull_request:45157", "pull_request:43178|pull_request:45410", "pull_request:43178|pull_request:45286", "pull_request:43178|pull_request:45287", "pull_request:43178|pull_request:45288", "pull_request:43178|pull_request:45403", "pull_request:43936|pull_request:44739", "pull_request:43498|pull_request:43677", "pull_request:40820|pull_request:44390", "pull_request:43649|pull_request:45298"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 14574, "estimated_input_tokens": 7159, "item_count": 18, "node_count": 18, "serialized_chars": 28633, "soft_pair_count": 17}, "cached_at": "2026-04-14T17:39:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4f2b0ee4ec3e86c91d28db8fbea0e81a2966771a5b715bc1d599a5cf7ecc98e7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40755", "pull_request:41145", "pull_request:41224", "pull_request:41722", "pull_request:42993", "pull_request:43385", "pull_request:43465", "pull_request:43627", "pull_request:43707", "pull_request:43838", "pull_request:43902", "pull_request:44053", "pull_request:44413", "pull_request:44571", "pull_request:44760", "pull_request:45139", "pull_request:45283", "pull_request:45298"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43902", "reason": "Unrelated doc cleanup PRs touching different docs/code paths; no shared bug or feature.", "right": "pull_request:45139"}, {"accept": false, "left": "pull_request:41722", "reason": "Tamil README translation vs Qwen3-ASR support; completely different changes.", "right": "pull_request:43838"}, {"accept": false, "left": "pull_request:40755", "reason": "TimesFM covariates support and Intel XPU benchmark support are separate features.", "right": "pull_request:42993"}, {"accept": false, "left": "pull_request:43465", "reason": "Both are conversion-related, but one fixes gguf loading and the other Pegasus conversion; different code paths.", "right": "pull_request:44571"}, {"accept": false, "left": "pull_request:43465", "reason": "GGUF conversion/load fix vs PEFT conversion typo; not the same underlying issue.", "right": "pull_request:44053"}, {"accept": false, "left": "pull_request:45283", "reason": "Qwen3.5 GGUF loading support vs new Qwen2.5 VL model support; unrelated model work.", "right": "pull_request:45298"}, {"accept": false, "left": "pull_request:44413", "reason": "PEFT mapping fix vs Pegasus conversion fix; different subsystems and bugs.", "right": "pull_request:44571"}, {"accept": false, "left": "pull_request:43465", "reason": "GGUF loading/conversion issue vs PEFT mapping typo; no concrete overlap.", "right": "pull_request:44413"}, {"accept": false, "left": "pull_request:41224", "reason": "Both add model support, but for different models; not mergeable as one PR.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:41224", "reason": "Different model-support additions (DINOv3ViT vs SLANeXt), same broad pattern only.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:42993", "reason": "Benchmark_v2 Intel XPU support is unrelated to UVDoc model support.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:43385", "reason": "UVDoc model integration and Qwen3.5 GGUF loading support are unrelated.", "right": "pull_request:45283"}, {"accept": false, "left": "pull_request:41224", "reason": "Image-classification model support vs benchmark platform support; different changes.", "right": "pull_request:42993"}, {"accept": false, "left": "pull_request:43385", "reason": "Both add model support, but for different models and code paths.", "right": "pull_request:44760"}, {"accept": false, "left": "pull_request:41224", "reason": "DINOv3ViT support and Mistral4 support are separate model additions.", "right": "pull_request:44760"}, {"accept": false, "left": "pull_request:41145", "reason": "Qwen3 rope-kernel support and DINOv3ViT image-classification support are unrelated.", "right": "pull_request:41224"}, {"accept": false, "left": "pull_request:43627", "reason": "A scratch notebook and Qwen2.5 VL model support are unrelated artifacts.", "right": "pull_request:45298"}], "summary": "This cluster is heterogeneous: it mixes unrelated model-support PRs, conversion fixes, docs-only changes, benchmark support, and notebook/docs additions. None of the soft edges look like the same underlying bug or change, so all should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43902|pull_request:45139", "pull_request:41722|pull_request:43838", "pull_request:40755|pull_request:42993", "pull_request:43465|pull_request:44571", "pull_request:43465|pull_request:44053", "pull_request:45283|pull_request:45298", "pull_request:44413|pull_request:44571", "pull_request:43465|pull_request:44413", "pull_request:41224|pull_request:43385", "pull_request:41224|pull_request:43707", "pull_request:42993|pull_request:43385", "pull_request:43385|pull_request:45283", "pull_request:41224|pull_request:42993", "pull_request:43385|pull_request:44760", "pull_request:41224|pull_request:44760", "pull_request:41145|pull_request:41224", "pull_request:43627|pull_request:45298"], "split": true, "trimmed": true} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22620, "estimated_input_tokens": 11182, "item_count": 18, "node_count": 18, "serialized_chars": 44727, "soft_pair_count": 13}, "cached_at": "2026-04-14T17:40:59Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c837b05da47e5871392af4d8991cefa3fa80ff95ec37951e6064862a69d200da", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41061", "pull_request:41272", "pull_request:42227", "pull_request:43385", "pull_request:43527", "pull_request:43558", "pull_request:43647", "pull_request:43648", "pull_request:43649", "pull_request:43677", "pull_request:43707", "pull_request:43946", "pull_request:44055", "pull_request:44152", "pull_request:44319", "pull_request:44357", "pull_request:44395", "pull_request:44994"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43649 is the closest thing to a representative because it is the broadest and most central among the workflow/reporting edits, but it still does not represent the whole cluster well.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR fits well; the items span different models, different bugs, and unrelated workflow/docs changes.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:42227", "reason": "Different fixes in different code paths: Whisper language return vs. Beit/DPT label reduction.", "right": "pull_request:43527"}, {"accept": false, "left": "pull_request:41272", "reason": "Both add models, but for different architectures and different files; not the same change.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:41272", "reason": "Separate model integrations for different models; only broad auto-model registration overlap.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:43946", "reason": "Unrelated fixes: Docker image dependency changes vs. a guarded torch import in a tokenizer module.", "right": "pull_request:44055"}, {"accept": false, "left": "pull_request:41272", "reason": "Different model-add PRs for different models; no shared underlying bug or feature.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:43647", "reason": "Both touch workflow/reporting files, but they look like separate iterations with different supporting edits, not one concrete duplicate fix.", "right": "pull_request:43649"}, {"accept": false, "left": "pull_request:43648", "reason": "Similar workflow area, but the file sets and exact edits differ enough that this is not clearly the same change.", "right": "pull_request:43649"}, {"accept": false, "left": "pull_request:43677", "reason": "Different problems: video chat-template kwargs handling vs. RoPE initialization.", "right": "pull_request:44357"}, {"accept": false, "left": "pull_request:43558", "reason": "A style-only test edit is unrelated to workflow/CI behavior.", "right": "pull_request:43647"}, {"accept": false, "left": "pull_request:43558", "reason": "Test style cleanup and workflow edits are not the same underlying bug or change.", "right": "pull_request:43648"}, {"accept": false, "left": "pull_request:41061", "reason": "Parakeet model support and grouped_mm autograd support are unrelated changes.", "right": "pull_request:44152"}, {"accept": false, "left": "pull_request:41061", "reason": "Different model support vs. MoE expert-behavior changes; no duplicate signal.", "right": "pull_request:44319"}, {"accept": false, "left": "pull_request:43677", "reason": "Both are small bug/security fixes, but they target different subsystems and different code paths.", "right": "pull_request:44395"}], "summary": "This cluster is mostly a grab bag of unrelated PRs: several independent model additions, a few small bug fixes, and a separate batch of GitHub Actions/reporting edits. I don\u2019t see a true duplicate group here."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:42227|pull_request:43527", "pull_request:41272|pull_request:43385", "pull_request:41272|pull_request:43707", "pull_request:43946|pull_request:44055", "pull_request:41272|pull_request:44994", "pull_request:43647|pull_request:43649", "pull_request:43648|pull_request:43649", "pull_request:43677|pull_request:44357", "pull_request:43558|pull_request:43647", "pull_request:43558|pull_request:43648", "pull_request:41061|pull_request:44152", "pull_request:41061|pull_request:44319", "pull_request:43677|pull_request:44395"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 16582, "estimated_input_tokens": 8163, "item_count": 18, "node_count": 18, "serialized_chars": 32652, "soft_pair_count": 22}, "cached_at": "2026-04-14T17:41:50Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e10c1dc36dc2541f45d98e8c89befc4948277469f8cb5cf2036cea27e6ad91a0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40294", "pull_request:41061", "pull_request:43098", "pull_request:43247", "pull_request:43274", "pull_request:43291", "pull_request:43385", "pull_request:43498", "pull_request:43627", "pull_request:43647", "pull_request:43648", "pull_request:43707", "pull_request:43884", "pull_request:44034", "pull_request:44308", "pull_request:44994", "pull_request:45139", "pull_request:45286"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43647", "reason": "Both touch test/CI-related files, but 43647 is a workflow tweak while 43884 injects a failing assertion into ViT tests; not the same concrete change or bug.", "right": "pull_request:43884"}, {"accept": false, "left": "pull_request:43647", "reason": "Shared ViT test filename only; one PR changes self-comment workflow logic, the other alters CLIP/ViT test code for a different purpose.", "right": "pull_request:44034"}, {"accept": false, "left": "pull_request:43648", "reason": "Both are test/workflow hacks, but they target different files and different behaviors; not one fixable code-path problem.", "right": "pull_request:43884"}, {"accept": false, "left": "pull_request:43648", "reason": "Overlap on test files is too broad; 43648 is workflow/reporting related, while 44034 modifies model tests for a separate issue.", "right": "pull_request:44034"}, {"accept": false, "left": "pull_request:41061", "reason": "Both are model-support PRs, but for different models and code paths (Tdt/Parakeet-related files vs UVDoc). Shared auto-registration files are boilerplate only.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:41061", "reason": "Different model integrations entirely; same docs/auto-registry scaffolding does not indicate the same underlying change.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:43498", "reason": "Both mention modeling_utils, but 43498 is a tie_weights backward-compatibility tweak while 45139 is a broader vLLM/rope-related compatibility fix; not the same bug.", "right": "pull_request:45139"}, {"accept": false, "left": "pull_request:40294", "reason": "Distinct model-addition PRs for Higgs Audio V2 and UVDoc; overlapping docs/auto files are generic registration edits, not a shared code-path fix.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:40294", "reason": "Different models and implementations; no evidence they solve the same underlying issue.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:43385", "reason": "UVDoc support and SAM3-LiteText support are unrelated model additions; shared support infrastructure is not enough to merge them.", "right": "pull_request:44308"}, {"accept": false, "left": "pull_request:43707", "reason": "SLANeXt support vs SAM3-LiteText support are separate model integrations with different files and code paths.", "right": "pull_request:44308"}, {"accept": false, "left": "pull_request:43098", "reason": "PP-DocLayoutV3 support and sarvam model addition are unrelated model ports; similar large diffs do not imply the same bug or change.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:43247", "reason": "PP-OCRv5_mobile_det and sarvam are different model families; no shared concrete fix.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:43274", "reason": "PP-OCRv5_server_det and sarvam address different models/code paths; not duplicates.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:40294", "reason": "Both are model-support PRs with shared registration boilerplate, but they add different models and cannot plausibly be merged as one change.", "right": "pull_request:43098"}, {"accept": false, "left": "pull_request:40294", "reason": "Different model additions (Higgs Audio V2 vs PP-OCRv5_mobile_det); same infrastructure files are incidental.", "right": "pull_request:43247"}, {"accept": false, "left": "pull_request:40294", "reason": "Distinct model support PRs for different architectures; no same underlying bug/change.", "right": "pull_request:43274"}, {"accept": false, "left": "pull_request:43098", "reason": "PP-DocLayoutV3 and SAM3-LiteText are unrelated model support changes.", "right": "pull_request:44308"}, {"accept": false, "left": "pull_request:43247", "reason": "Different model support efforts with no shared concrete code-path problem.", "right": "pull_request:44308"}, {"accept": false, "left": "pull_request:43274", "reason": "PP-OCRv5_server_det and SAM3-LiteText are separate model integrations, so they should not be merged.", "right": "pull_request:44308"}, {"accept": false, "left": "pull_request:43627", "reason": "A new scratch notebook is unrelated to a model-support PR; no duplicate/change overlap.", "right": "pull_request:44308"}, {"accept": false, "left": "pull_request:43291", "reason": "Both are test-related, but one fixes Whisper tokenization/tests and the other is a Nomic BERT test autofix; different modules and issues.", "right": "pull_request:45286"}], "summary": "This cluster is mostly a set of unrelated model-addition PRs plus a few test/CI hacks; the overlaps are broad boilerplate files rather than the same underlying bug or change. No soft-edge pair should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43647|pull_request:43884", "pull_request:43647|pull_request:44034", "pull_request:43648|pull_request:43884", "pull_request:43648|pull_request:44034", "pull_request:41061|pull_request:43385", "pull_request:41061|pull_request:43707", "pull_request:43498|pull_request:45139", "pull_request:40294|pull_request:43385", "pull_request:40294|pull_request:43707", "pull_request:43385|pull_request:44308", "pull_request:43707|pull_request:44308", "pull_request:43098|pull_request:44994", "pull_request:43247|pull_request:44994", "pull_request:43274|pull_request:44994", "pull_request:40294|pull_request:43098", "pull_request:40294|pull_request:43247", "pull_request:40294|pull_request:43274", "pull_request:43098|pull_request:44308", "pull_request:43247|pull_request:44308", "pull_request:43274|pull_request:44308", "pull_request:43627|pull_request:44308", "pull_request:43291|pull_request:45286"], "split": true, "trimmed": true} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 16046, "estimated_input_tokens": 7895, "item_count": 18, "node_count": 18, "serialized_chars": 31578, "soft_pair_count": 23}, "cached_at": "2026-04-14T17:42:32Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2aab2c85cb1257e6495c12ae656d1d86c3f5d668d4f883099a5459f809b72992", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:36895", "pull_request:39455", "pull_request:42772", "pull_request:42978", "pull_request:43291", "pull_request:43385", "pull_request:43448", "pull_request:43451", "pull_request:43665", "pull_request:43858", "pull_request:44539", "pull_request:44815", "pull_request:44994", "pull_request:45286", "pull_request:45287", "pull_request:45288", "pull_request:45403", "pull_request:45410"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No PR is a good global representative because the apparent similarity is only at the broad subsystem level (docs/auto registration/tests), not a shared concrete bug or feature.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR fits the cluster: the items span distinct model additions (e.g. Molmo, Molmo2, UVDoc, EuroBERT, RF-DETR) and unrelated test/fix work, so there is no one underlying change to anchor on.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43291", "reason": "Different fixes in different models: Whisper tokenization vs VideoMT tests; not the same bug.", "right": "pull_request:45287"}, {"accept": false, "left": "pull_request:43291", "reason": "Whisper tokenizer change and Cohere ASR test cleanup are unrelated.", "right": "pull_request:45288"}, {"accept": false, "left": "pull_request:43448", "reason": "Molmo and Molmo2 are separate model support PRs, not one merged change.", "right": "pull_request:43451"}, {"accept": false, "left": "pull_request:39455", "reason": "EuroBERT support and ViT NEPA support are distinct model additions.", "right": "pull_request:42978"}, {"accept": false, "left": "pull_request:42978", "reason": "ViT NEPA and Molmo are unrelated model support additions.", "right": "pull_request:43448"}, {"accept": false, "left": "pull_request:42978", "reason": "ViT NEPA and Molmo2 are separate features, not one code-path fix.", "right": "pull_request:43451"}, {"accept": false, "left": "pull_request:36895", "reason": "RF-DETR support and EuroBERT support are unrelated model-addition PRs.", "right": "pull_request:39455"}, {"accept": false, "left": "pull_request:43448", "reason": "Molmo support and A.X K1 support are distinct model integrations.", "right": "pull_request:44539"}, {"accept": false, "left": "pull_request:43451", "reason": "Molmo2 support and A.X K1 support are unrelated.", "right": "pull_request:44539"}, {"accept": false, "left": "pull_request:43448", "reason": "Molmo support and sarvam model support are different additions.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:43451", "reason": "Molmo2 support and sarvam model support do not describe the same change.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:42978", "reason": "ViT NEPA support and sarvam model support are unrelated model PRs.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:43448", "reason": "Molmo support and GlmMoeDsa support are separate model-addition changes.", "right": "pull_request:43858"}, {"accept": false, "left": "pull_request:43451", "reason": "Molmo2 and GlmMoeDsa are unrelated model integrations.", "right": "pull_request:43858"}, {"accept": false, "left": "pull_request:43385", "reason": "UVDoc and Molmo are different model support PRs.", "right": "pull_request:43448"}, {"accept": false, "left": "pull_request:43385", "reason": "UVDoc and Molmo2 are unrelated additions.", "right": "pull_request:43451"}, {"accept": false, "left": "pull_request:43665", "reason": "CLIP/ViT test tweaks and AltCLIP test fixes are different test targets.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:44815", "reason": "Mistral dequant fix and AltCLIP test fix are unrelated code paths.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:43665", "reason": "Different model test files; these are separate failing-test fixes.", "right": "pull_request:45286"}, {"accept": false, "left": "pull_request:43665", "reason": "CLIP/ViT test changes and VideoMT test changes are not the same fix.", "right": "pull_request:45287"}, {"accept": false, "left": "pull_request:43665", "reason": "CLIP/ViT test changes and Cohere ASR test changes are unrelated.", "right": "pull_request:45288"}, {"accept": false, "left": "pull_request:43665", "reason": "Model-specific CLIP/ViT test updates and a common test helper change are not the same concrete bug.", "right": "pull_request:45403"}, {"accept": false, "left": "pull_request:42772", "reason": "Whisper tokenization behavior and CLIP/ViT test fixes are unrelated.", "right": "pull_request:43665"}], "summary": "This cluster is noisy and mostly contains unrelated PRs: several independent model-addition PRs plus a few isolated test/fix PRs. The soft edges are driven by shared infra files or broad auto-generated patterns, not the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43291|pull_request:45287", "pull_request:43291|pull_request:45288", "pull_request:43448|pull_request:43451", "pull_request:39455|pull_request:42978", "pull_request:42978|pull_request:43448", "pull_request:42978|pull_request:43451", "pull_request:36895|pull_request:39455", "pull_request:43448|pull_request:44539", "pull_request:43451|pull_request:44539", "pull_request:43448|pull_request:44994", "pull_request:43451|pull_request:44994", "pull_request:42978|pull_request:44994", "pull_request:43448|pull_request:43858", "pull_request:43451|pull_request:43858", "pull_request:43385|pull_request:43448", "pull_request:43385|pull_request:43451", "pull_request:43665|pull_request:45410", "pull_request:44815|pull_request:45410", "pull_request:43665|pull_request:45286", "pull_request:43665|pull_request:45287", "pull_request:43665|pull_request:45288", "pull_request:43665|pull_request:45403", "pull_request:42772|pull_request:43665"], "split": true, "trimmed": true} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20882, "estimated_input_tokens": 10313, "item_count": 18, "node_count": 18, "serialized_chars": 41251, "soft_pair_count": 24}, "cached_at": "2026-04-14T17:43:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "99a1f8e8c51caa36320a4c8098270863fe404d1f6d9bd8ef184ca861e3008e08", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42227", "pull_request:42772", "pull_request:43291", "pull_request:43465", "pull_request:43544", "pull_request:43665", "pull_request:43669", "pull_request:43677", "pull_request:43851", "pull_request:43926", "pull_request:44053", "pull_request:44055", "pull_request:44564", "pull_request:44571", "pull_request:44815", "pull_request:44876", "pull_request:45016", "pull_request:45139"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43465 is the most suitable cluster representative: merged, issue-linked, and a real code-path fix. The rest of the items are either unrelated or too generic to serve as a canonical anchor.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43465 is the best representative because it is a merged, explicit-issue-linked, concrete code fix with clear scope and stronger substance than the many unrelated or test/doc-only PRs.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43665", "reason": "Different changes: CLIP/ViT test edits vs ModernBERT norm_eps dtype fix.", "right": "pull_request:43669"}, {"accept": false, "left": "pull_request:43665", "reason": "Unrelated areas: test failures in CLIP/ViT vs GLM MoE DSA attention logic.", "right": "pull_request:44564"}, {"accept": false, "left": "pull_request:43665", "reason": "Different bug classes: test insertions vs Pegasus conversion/dependency changes.", "right": "pull_request:44571"}, {"accept": false, "left": "pull_request:43665", "reason": "Test-only edits vs Nemotron-H modular/model cleanup; not the same code-path problem.", "right": "pull_request:44876"}, {"accept": false, "left": "pull_request:42227", "reason": "Whisper return_language fix is unrelated to CLIP/ViT test placeholders.", "right": "pull_request:43665"}, {"accept": false, "left": "pull_request:43544", "reason": "Docstring typo fix is unrelated to test-only failures.", "right": "pull_request:43665"}, {"accept": false, "left": "pull_request:43665", "reason": "PEFT weight-mapping typo fix is a different subsystem and bug than CLIP/ViT tests.", "right": "pull_request:44053"}, {"accept": false, "left": "pull_request:43665", "reason": "Torch import guard in VibeVoice is unrelated to CLIP/ViT test edits.", "right": "pull_request:44055"}, {"accept": false, "left": "pull_request:43665", "reason": "GLM5 inference bug fix is unrelated to the CLIP/ViT test modifications.", "right": "pull_request:45016"}, {"accept": false, "left": "pull_request:43465", "reason": "GGUF recent conversion/model-loading fix is unrelated to CLIP/ViT test changes.", "right": "pull_request:43665"}, {"accept": false, "left": "pull_request:43665", "reason": "Video mm_load kwargs handling in processing_utils is unrelated to test scaffolding changes.", "right": "pull_request:43677"}, {"accept": false, "left": "pull_request:43665", "reason": "Slack workflow install step is unrelated to CLIP/ViT test edits.", "right": "pull_request:43851"}, {"accept": false, "left": "pull_request:43665", "reason": "Deepspeed weight conversion fix is a different subsystem and code path.", "right": "pull_request:43926"}, {"accept": false, "left": "pull_request:43665", "reason": "RoPE/vLLM-related changes are unrelated to CLIP/ViT test placeholders.", "right": "pull_request:45139"}, {"accept": false, "left": "pull_request:42772", "reason": "Tokenization-auto backend selection and FP8 dequant fixes are unrelated changes.", "right": "pull_request:44815"}, {"accept": false, "left": "pull_request:43669", "reason": "ModernBERT norm_eps dtype fix is unrelated to FP8 dequant logic.", "right": "pull_request:44815"}, {"accept": false, "left": "pull_request:44564", "reason": "GLM DSA attention/rotary changes are unrelated to fine-grained FP8 dequant fixes.", "right": "pull_request:44815"}, {"accept": false, "left": "pull_request:44571", "reason": "Pegasus conversion/dependency changes do not match the FP8 dequant bug.", "right": "pull_request:44815"}, {"accept": false, "left": "pull_request:44815", "reason": "Fine-grained FP8 dequant work and Nemotron-H modular cleanup are not the same fix.", "right": "pull_request:44876"}, {"accept": false, "left": "pull_request:42227", "reason": "Whisper return_language bug is unrelated to FP8 dequant behavior.", "right": "pull_request:44815"}, {"accept": false, "left": "pull_request:43544", "reason": "Docstring typo cleanup is unrelated to dequantization/kernel fixes.", "right": "pull_request:44815"}, {"accept": false, "left": "pull_request:44055", "reason": "Unprotected torch import guard is unrelated to FP8 dequant logic.", "right": "pull_request:44815"}, {"accept": false, "left": "pull_request:44815", "reason": "GLM5 inference bug fix is a different model-path issue than FP8 dequant padding/conversion.", "right": "pull_request:45016"}, {"accept": false, "left": "pull_request:43291", "reason": "Whisper test fixes are unrelated to the FP8 dequant change.", "right": "pull_request:44815"}], "summary": "This cluster is heterogeneous: it mixes unrelated PRs across Whisper, CLIP/ViT tests, GGUF/model loading, PEFT/Deepspeed, docs, workflows, and several model-specific fixes. The soft edges look like superficial title similarity only; none appear to be the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43665|pull_request:43669", "pull_request:43665|pull_request:44564", "pull_request:43665|pull_request:44571", "pull_request:43665|pull_request:44876", "pull_request:42227|pull_request:43665", "pull_request:43544|pull_request:43665", "pull_request:43665|pull_request:44053", "pull_request:43665|pull_request:44055", "pull_request:43665|pull_request:45016", "pull_request:43465|pull_request:43665", "pull_request:43665|pull_request:43677", "pull_request:43665|pull_request:43851", "pull_request:43665|pull_request:43926", "pull_request:43665|pull_request:45139", "pull_request:42772|pull_request:44815", "pull_request:43669|pull_request:44815", "pull_request:44564|pull_request:44815", "pull_request:44571|pull_request:44815", "pull_request:44815|pull_request:44876", "pull_request:42227|pull_request:44815", "pull_request:43544|pull_request:44815", "pull_request:44055|pull_request:44815", "pull_request:44815|pull_request:45016", "pull_request:43291|pull_request:44815"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20280, "estimated_input_tokens": 10012, "item_count": 17, "node_count": 17, "serialized_chars": 40046, "soft_pair_count": 22}, "cached_at": "2026-04-14T17:44:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "df5b8ce151fc2ea6426ad26b6881499f960ef05c2b0e8dea888f39275ba38cba", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42772", "pull_request:43291", "pull_request:43453", "pull_request:43669", "pull_request:44343", "pull_request:44352", "pull_request:44364", "pull_request:44388", "pull_request:44398", "pull_request:44429", "pull_request:44456", "pull_request:44470", "pull_request:44515", "pull_request:44519", "pull_request:44564", "pull_request:44690", "pull_request:44876"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44564 best captures the dominant underlying bug fix in the cluster and is the most merge-ready artifact; the remaining items are either alternate iterations of that fix or unrelated work.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44564 is the cleanest representative of the largest coherent change: it is merged, explicitly targets the same issue, touches the same two GLM-MOE-DSA files, and the other GLM-MOE-DSA PRs are close revisions of the same fix.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:42772", "reason": "Different subsystems and different problems: tokenization auto-backend selection vs Whisper decode/test cleanup.", "right": "pull_request:43291"}, {"accept": false, "left": "pull_request:42772", "reason": "Unrelated changes in tokenization auto vs ModernBERT config typing.", "right": "pull_request:43669"}, {"accept": false, "left": "pull_request:42772", "reason": "Completely different code paths and bug types; no shared underlying change.", "right": "pull_request:44564"}, {"accept": false, "left": "pull_request:42772", "reason": "Tokenization backend selection is unrelated to Nemotron-H modular/model typing cleanup.", "right": "pull_request:44876"}, {"accept": false, "left": "pull_request:43291", "reason": "Whisper test/decode fix is unrelated to ModernBERT norm_eps dtype fix.", "right": "pull_request:43669"}, {"accept": false, "left": "pull_request:43291", "reason": "Whisper test/decode behavior and GLM-MOE-DSA scoring are unrelated bugs.", "right": "pull_request:44564"}, {"accept": false, "left": "pull_request:43291", "reason": "Different models and unrelated fixes.", "right": "pull_request:44876"}, {"accept": false, "left": "pull_request:43669", "reason": "ModernBERT dtype/config fix is unrelated to GLM-MOE-DSA scoring logic.", "right": "pull_request:44564"}, {"accept": false, "left": "pull_request:43669", "reason": "Separate model families and separate issues.", "right": "pull_request:44876"}, {"accept": true, "left": "pull_request:44364", "reason": "Same GLM-MOE-DSA scoring bug: same files, same issue target, same missing ReLU correction.", "right": "pull_request:44690"}, {"accept": true, "left": "pull_request:44398", "reason": "Same underlying GLM-MOE-DSA ReLU fix with the same code path and issue target.", "right": "pull_request:44690"}, {"accept": true, "left": "pull_request:44352", "reason": "Both are the same loading_report non-TTY ANSI suppression fix in the same file.", "right": "pull_request:44388"}, {"accept": true, "left": "pull_request:44343", "reason": "Same loading_report ANSI escape handling bug; one is a variant of the other.", "right": "pull_request:44352"}, {"accept": true, "left": "pull_request:44343", "reason": "Same non-TTY loading_report ANSI/style fix, just implemented with a different helper shape.", "right": "pull_request:44429"}, {"accept": true, "left": "pull_request:44352", "reason": "Same loading_report terminal-style suppression change; same underlying bug.", "right": "pull_request:44429"}, {"accept": true, "left": "pull_request:44364", "reason": "Same GLM-MOE-DSA missing-ReLU fix in the same files and issue.", "right": "pull_request:44398"}, {"accept": true, "left": "pull_request:44343", "reason": "Same loading_report ANSI/TTY bug and same target file.", "right": "pull_request:44388"}, {"accept": true, "left": "pull_request:44364", "reason": "Same GLM-MOE-DSA scoring correction; later PR is another revision of the same patch.", "right": "pull_request:44470"}, {"accept": true, "left": "pull_request:44398", "reason": "Same concrete ReLU insertion in the GLM-MOE-DSA indexer.", "right": "pull_request:44470"}, {"accept": false, "left": "pull_request:43453", "reason": "44253 is a broader pad_token_id attribute/config fix across models, while 44519 only adjusts a Marian integration test.", "right": "pull_request:44519"}, {"accept": false, "left": "pull_request:43453", "reason": "Pad token attribute/config bug fix is unrelated to DepthPro test dtype adjustment.", "right": "pull_request:44456"}, {"accept": false, "left": "pull_request:43453", "reason": "Different failure modes: missing pad_token_id attribute vs GPT-Neo generation test timeout/length adjustment.", "right": "pull_request:44515"}], "summary": "This cluster is not one duplicate set; it contains three unrelated subgroups: a GLM-MOE-DSA ReLU fix family, a loading_report ANSI/TTY fix family, and several isolated test/config tweaks. The strongest representative is the merged GLM-MOE-DSA ReLU PR."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:42772|pull_request:43291", "pull_request:42772|pull_request:43669", "pull_request:42772|pull_request:44564", "pull_request:42772|pull_request:44876", "pull_request:43291|pull_request:43669", "pull_request:43291|pull_request:44564", "pull_request:43291|pull_request:44876", "pull_request:43669|pull_request:44564", "pull_request:43669|pull_request:44876", "pull_request:44364|pull_request:44690", "pull_request:44398|pull_request:44690", "pull_request:44352|pull_request:44388", "pull_request:44343|pull_request:44352", "pull_request:44343|pull_request:44429", "pull_request:44352|pull_request:44429", "pull_request:44364|pull_request:44398", "pull_request:44343|pull_request:44388", "pull_request:44364|pull_request:44470", "pull_request:44398|pull_request:44470", "pull_request:43453|pull_request:44519", "pull_request:43453|pull_request:44456", "pull_request:43453|pull_request:44515"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 11186, "estimated_input_tokens": 5465, "item_count": 8, "node_count": 8, "serialized_chars": 21858, "soft_pair_count": 24}, "cached_at": "2026-04-14T17:45:09Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "276251e8fd4779509493cf9b011f27c16bc48be4d03d7103845d22cb91899e6a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44191", "pull_request:44192", "pull_request:44193", "pull_request:44194", "pull_request:44195", "pull_request:44196", "pull_request:44197", "pull_request:44198"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "If a single representative is needed, 44196 is the clearest core-library bug fix: a minimal, targeted change in configuration loading with direct user impact.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: these PRs span unrelated subsystems (config loading, tokenizer conversion, optimizer args, model output behavior, video processor lookup, AudioFlamingo inference).", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44196", "reason": "Different code paths and bugs: config loading vs generation-config save validation.", "right": "pull_request:44197"}, {"accept": false, "left": "pull_request:44196", "reason": "Different subsystems: configuration loading vs tokenizer conversion/post-processor handling.", "right": "pull_request:44198"}, {"accept": false, "left": "pull_request:44192", "reason": "ModelOutput attribute assignment bug is unrelated to config loading behavior.", "right": "pull_request:44196"}, {"accept": false, "left": "pull_request:44192", "reason": "Different bugs in different files: ModelOutput assignment vs tokenizer conversion.", "right": "pull_request:44198"}, {"accept": false, "left": "pull_request:44195", "reason": "AddedToken construction fix is unrelated to GenerationConfig save-time validation.", "right": "pull_request:44197"}, {"accept": false, "left": "pull_request:44194", "reason": "Optimizer SGD argument coercion is a different problem from tokenizer loading.", "right": "pull_request:44198"}, {"accept": false, "left": "pull_request:44192", "reason": "No shared underlying bug; ModelOutput assignment and optimizer arg parsing are unrelated.", "right": "pull_request:44194"}, {"accept": false, "left": "pull_request:44194", "reason": "Optimizer argument handling and config loading failure are separate fixes.", "right": "pull_request:44196"}, {"accept": false, "left": "pull_request:44193", "reason": "Video auto processor lookup crash is unrelated to config attribute loading.", "right": "pull_request:44196"}, {"accept": false, "left": "pull_request:44193", "reason": "Different bugs in different components: video auto lookup vs AddedToken handling.", "right": "pull_request:44195"}, {"accept": false, "left": "pull_request:44197", "reason": "GenerationConfig validation/save logic is not the same as tokenizer conversion.", "right": "pull_request:44198"}, {"accept": false, "left": "pull_request:44195", "reason": "AddedToken special-flag bug and tokenizer post-processing bug are unrelated.", "right": "pull_request:44198"}, {"accept": false, "left": "pull_request:44191", "reason": "AudioFlamingo batched inference fix is unrelated to optimizer SGD argument parsing.", "right": "pull_request:44194"}, {"accept": false, "left": "pull_request:44193", "reason": "Video auto processor crash and GenerationConfig validation are different code paths.", "right": "pull_request:44197"}, {"accept": false, "left": "pull_request:44194", "reason": "Optimizer kwargs coercion and AddedToken construction are unrelated changes.", "right": "pull_request:44195"}, {"accept": false, "left": "pull_request:44191", "reason": "AudioFlamingo inference bug is not the same as GenerationConfig save validation.", "right": "pull_request:44197"}, {"accept": false, "left": "pull_request:44191", "reason": "AudioFlamingo batched inference and config loading are separate bugs.", "right": "pull_request:44196"}, {"accept": false, "left": "pull_request:44195", "reason": "AddedToken duplicate-key fix does not match the config loading issue.", "right": "pull_request:44196"}, {"accept": false, "left": "pull_request:44191", "reason": "Different underlying changes: AudioFlamingo batching vs AddedToken parsing.", "right": "pull_request:44195"}, {"accept": false, "left": "pull_request:44192", "reason": "ModelOutput key assignment bug is unrelated to AddedToken handling.", "right": "pull_request:44195"}, {"accept": false, "left": "pull_request:44191", "reason": "AudioFlamingo batched inference and ModelOutput assignment are unrelated code paths.", "right": "pull_request:44192"}, {"accept": false, "left": "pull_request:44193", "reason": "Video processor lookup crash and SGD optimizer args are unrelated.", "right": "pull_request:44194"}, {"accept": false, "left": "pull_request:44192", "reason": "Different components and failures: ModelOutput assignment vs video auto-processing lookup.", "right": "pull_request:44193"}], "summary": "No duplicate cluster here: all items are separate pull requests fixing different bugs in different code paths. The soft similarity is mostly from the shared \"Fix #...\" title pattern, not the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44196|pull_request:44197", "pull_request:44196|pull_request:44198", "pull_request:44192|pull_request:44196", "pull_request:44192|pull_request:44198", "pull_request:44195|pull_request:44197", "pull_request:44194|pull_request:44198", "pull_request:44192|pull_request:44194", "pull_request:44194|pull_request:44196", "pull_request:44193|pull_request:44196", "pull_request:44193|pull_request:44195", "pull_request:44197|pull_request:44198", "pull_request:44195|pull_request:44198", "pull_request:44191|pull_request:44194", "pull_request:44193|pull_request:44197", "pull_request:44194|pull_request:44195", "pull_request:44191|pull_request:44197", "pull_request:44191|pull_request:44196", "pull_request:44195|pull_request:44196", "pull_request:44194|pull_request:44197", "pull_request:44191|pull_request:44195", "pull_request:44192|pull_request:44195", "pull_request:44191|pull_request:44192", "pull_request:44193|pull_request:44194", "pull_request:44192|pull_request:44193"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 17524, "estimated_input_tokens": 8634, "item_count": 15, "node_count": 15, "serialized_chars": 34534, "soft_pair_count": 24}, "cached_at": "2026-04-14T17:45:51Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fb8a8c9b44774f8961a5400079b5edecfb5bc0b44ffed9630b92879ee417ccba", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43998", "pull_request:44000", "pull_request:44001", "pull_request:44002", "pull_request:44004", "pull_request:44025", "pull_request:44026", "pull_request:44027", "pull_request:44028", "pull_request:44029", "pull_request:44030", "pull_request:44456", "pull_request:44515", "pull_request:44519", "pull_request:44934"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44025 is the best standalone representative of the cluster\u2019s refactor series: it applies the new tracing decorator to two Depth Anything implementations and cleanly demonstrates the code-path change.", "canonical_issue_reason": null, "canonical_pr_reason": "No true duplicate PR here; the closest representative is 44025 because it is a clear, self-contained example of the shared output-tracing refactor pattern and touches two related model files.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44456", "reason": "Both are test-fix PRs, but they target different failures in different model families and different code paths; not the same bug.", "right": "pull_request:44934"}, {"accept": false, "left": "pull_request:44515", "reason": "These fix different integration tests with unrelated generation behavior changes, so they are not duplicates.", "right": "pull_request:44934"}, {"accept": false, "left": "pull_request:44519", "reason": "Marian config assertion changes and Qwen/T5 test adjustments are unrelated fixes; do not merge.", "right": "pull_request:44934"}, {"accept": false, "left": "pull_request:43998", "reason": "Both are output-tracing refactors, but they touch different model implementations and different forward paths.", "right": "pull_request:44002"}, {"accept": false, "left": "pull_request:43998", "reason": "Different models and different forward signatures; same refactor theme is too broad to be a duplicate.", "right": "pull_request:44000"}, {"accept": false, "left": "pull_request:44026", "reason": "Vision/encoder-decoder and RWKV are unrelated code paths; not the same bug or change.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:43998", "reason": "Separate model-specific refactors in different files; they are not mergeable as one duplicate fix.", "right": "pull_request:44001"}, {"accept": false, "left": "pull_request:44029", "reason": "RWKV and DPR have different implementations and output contracts, so these are distinct changes.", "right": "pull_request:44030"}, {"accept": false, "left": "pull_request:44025", "reason": "Both add can_return_tuple, but for different model families and different forward methods.", "right": "pull_request:44026"}, {"accept": false, "left": "pull_request:44025", "reason": "Depth Anything and Speech Encoder-Decoder are unrelated model paths; same refactor label is not enough.", "right": "pull_request:44027"}, {"accept": false, "left": "pull_request:44025", "reason": "Depth Anything and SuperPoint are distinct model implementations with different output handling.", "right": "pull_request:44028"}, {"accept": false, "left": "pull_request:44026", "reason": "Different encoder-decoder wrappers and different call chains; not the same underlying fix.", "right": "pull_request:44027"}, {"accept": false, "left": "pull_request:44026", "reason": "These refactor different models and forward contracts, so they are not duplicates.", "right": "pull_request:44028"}, {"accept": false, "left": "pull_request:44027", "reason": "Speech encoder-decoder and SuperPoint are unrelated; no shared concrete bug.", "right": "pull_request:44028"}, {"accept": false, "left": "pull_request:44001", "reason": "UnivNet and UperNet are different architectures; same output-tracing refactor pattern does not make them duplicates.", "right": "pull_request:44002"}, {"accept": false, "left": "pull_request:44000", "reason": "Different model code paths and signatures; these are separate refactors.", "right": "pull_request:44001"}, {"accept": false, "left": "pull_request:44000", "reason": "Vision-text dual encoder and UperNet do not share the same concrete change.", "right": "pull_request:44002"}, {"accept": false, "left": "pull_request:43998", "reason": "Timm backbone and RWKV are unrelated implementations; no duplicate bug or change.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:44002", "reason": "UperNet and RWKV differ in architecture and forward behavior, so they should stay separate.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:43998", "reason": "Timm backbone refactor and CodeGen output-capturing changes are different code paths and not mergeable as one PR.", "right": "pull_request:44004"}, {"accept": false, "left": "pull_request:44001", "reason": "UnivNet and CodeGen are separate model implementations; same pattern is too broad for duplicate triage.", "right": "pull_request:44004"}, {"accept": false, "left": "pull_request:44002", "reason": "UperNet and CodeGen touch different forward logic and output handling.", "right": "pull_request:44004"}, {"accept": false, "left": "pull_request:44025", "reason": "Depth Anything and RWKV are unrelated model families; no shared concrete bug.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:44025", "reason": "Depth Anything and DPR are different model implementations with different output semantics.", "right": "pull_request:44030"}], "summary": "This cluster is mostly a set of similarly named but separate pull requests for different models, plus a few unrelated test-only fixes. They share a broad theme of refactoring output tracing or stabilizing tests, but they are not the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44456|pull_request:44934", "pull_request:44515|pull_request:44934", "pull_request:44519|pull_request:44934", "pull_request:43998|pull_request:44002", "pull_request:43998|pull_request:44000", "pull_request:44026|pull_request:44029", "pull_request:43998|pull_request:44001", "pull_request:44029|pull_request:44030", "pull_request:44025|pull_request:44026", "pull_request:44025|pull_request:44027", "pull_request:44025|pull_request:44028", "pull_request:44026|pull_request:44027", "pull_request:44026|pull_request:44028", "pull_request:44027|pull_request:44028", "pull_request:44001|pull_request:44002", "pull_request:44000|pull_request:44001", "pull_request:44000|pull_request:44002", "pull_request:43998|pull_request:44029", "pull_request:44002|pull_request:44029", "pull_request:43998|pull_request:44004", "pull_request:44001|pull_request:44004", "pull_request:44002|pull_request:44004", "pull_request:44025|pull_request:44029", "pull_request:44025|pull_request:44030"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20052, "estimated_input_tokens": 9898, "item_count": 18, "node_count": 18, "serialized_chars": 39591, "soft_pair_count": 22}, "cached_at": "2026-04-14T17:46:42Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "dd95672d0151dad72ac5ce2e4342e613bc86d9bb3106e76f25747989f8655626", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43324", "pull_request:43339", "pull_request:44000", "pull_request:44001", "pull_request:44002", "pull_request:44025", "pull_request:44026", "pull_request:44027", "pull_request:44028", "pull_request:44029", "pull_request:44030", "pull_request:44439", "pull_request:44456", "pull_request:44515", "pull_request:44519", "pull_request:44934", "pull_request:45048", "pull_request:45214"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44026", "reason": "Different models and code paths; only the broad output-tracing refactor theme matches.", "right": "pull_request:44030"}, {"accept": false, "left": "pull_request:44027", "reason": "Different model implementations; not the same bug or change.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:44027", "reason": "Separate seq2seq wrapper refactors for different architectures.", "right": "pull_request:44030"}, {"accept": false, "left": "pull_request:44028", "reason": "Unrelated models; similar refactor wording is not enough.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:44028", "reason": "Different model families and implementation details.", "right": "pull_request:44030"}, {"accept": false, "left": "pull_request:44456", "reason": "Both fix failing tests, but for different models and failures.", "right": "pull_request:44515"}, {"accept": false, "left": "pull_request:44456", "reason": "Different integration test failures in different model families.", "right": "pull_request:44519"}, {"accept": false, "left": "pull_request:44515", "reason": "Same test-fix pattern, but unrelated underlying failure.", "right": "pull_request:44519"}, {"accept": false, "left": "pull_request:44439", "reason": "Different model integration tests and distinct failure modes.", "right": "pull_request:44456"}, {"accept": false, "left": "pull_request:44439", "reason": "Separate model-specific generation/integration fixes.", "right": "pull_request:44515"}, {"accept": false, "left": "pull_request:44439", "reason": "Not the same underlying bug; only generic test-fix similarity.", "right": "pull_request:44519"}, {"accept": false, "left": "pull_request:44934", "reason": "Different models and different expected outputs; no shared bug.", "right": "pull_request:45048"}, {"accept": false, "left": "pull_request:44001", "reason": "Both are output-tracing refactors, but in different models.", "right": "pull_request:44025"}, {"accept": false, "left": "pull_request:44001", "reason": "Different model code paths; same refactor theme only.", "right": "pull_request:44026"}, {"accept": false, "left": "pull_request:44002", "reason": "Separate model-specific tracing refactors.", "right": "pull_request:44025"}, {"accept": false, "left": "pull_request:44000", "reason": "Different architectures; not a mergeable single fix.", "right": "pull_request:44025"}, {"accept": false, "left": "pull_request:44000", "reason": "Unrelated model implementations despite similar refactor language.", "right": "pull_request:44026"}, {"accept": false, "left": "pull_request:44456", "reason": "Distinct integration test failures for different models.", "right": "pull_request:45048"}, {"accept": false, "left": "pull_request:44515", "reason": "Different generation tests and model families.", "right": "pull_request:45048"}, {"accept": false, "left": "pull_request:44519", "reason": "Separate failing tests; no common code-path bug.", "right": "pull_request:45048"}, {"accept": false, "left": "pull_request:43324", "reason": "Both are device-specific test expectation updates, but for different models.", "right": "pull_request:43339"}, {"accept": false, "left": "pull_request:43324", "reason": "Completely different model areas and fixes; no underlying duplication.", "right": "pull_request:45214"}], "summary": "All proposed links are superficial similarity matches, not true duplicates: they span different models, different test cases, or unrelated refactors with the same broad wording. No soft edge should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44026|pull_request:44030", "pull_request:44027|pull_request:44029", "pull_request:44027|pull_request:44030", "pull_request:44028|pull_request:44029", "pull_request:44028|pull_request:44030", "pull_request:44456|pull_request:44515", "pull_request:44456|pull_request:44519", "pull_request:44515|pull_request:44519", "pull_request:44439|pull_request:44456", "pull_request:44439|pull_request:44515", "pull_request:44439|pull_request:44519", "pull_request:44934|pull_request:45048", "pull_request:44001|pull_request:44025", "pull_request:44001|pull_request:44026", "pull_request:44002|pull_request:44025", "pull_request:44000|pull_request:44025", "pull_request:44000|pull_request:44026", "pull_request:44456|pull_request:45048", "pull_request:44515|pull_request:45048", "pull_request:44519|pull_request:45048", "pull_request:43324|pull_request:43339", "pull_request:43324|pull_request:45214"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 17402, "estimated_input_tokens": 8573, "item_count": 18, "node_count": 18, "serialized_chars": 34291, "soft_pair_count": 14}, "cached_at": "2026-04-14T17:47:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fa9ba127df31971e8f23aba1057fdaf03111bdd32a019cd100347dc36ed4374f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43324", "pull_request:43339", "pull_request:43464", "pull_request:43488", "pull_request:43500", "pull_request:43563", "pull_request:43615", "pull_request:43938", "pull_request:44179", "pull_request:44235", "pull_request:44456", "pull_request:44490", "pull_request:44515", "pull_request:44519", "pull_request:44527", "pull_request:44566", "pull_request:45190", "pull_request:45214"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45214 is the best representative PR because it addresses a concrete code-path bug with implementation and test coverage, unlike the mostly test-only or formatting/typing PRs in this set.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45214 is the strongest standalone fix here: it changes the model implementation in both `modeling_cohere_asr.py` and `modular_cohere_asr.py` and adds a targeted test. The rest are mostly isolated test-data or cleanup PRs, not a single shared change.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44179", "reason": "Same file, but different model entries (`ministral` vs `fuyu`) and different fixes. Related subsystem, not the same change.", "right": "pull_request:44235"}, {"accept": false, "left": "pull_request:44566", "reason": "Both are typing-related CLI cleanup, but they touch different typing issues and spans of code. Not one concrete duplicate fix.", "right": "pull_request:45190"}, {"accept": false, "left": "pull_request:43324", "reason": "Different model test files and different expected values for different models; no shared bug or change.", "right": "pull_request:43938"}, {"accept": false, "left": "pull_request:43339", "reason": "One is an XPU test expectation update for `lw_detr`; the other is a model-parallel runtime fix for `cohere_asr`. Different problems.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:43488", "reason": "Both modify `test_modeling_vit.py`, but they are separate bot-check/style probes with different edits and intent.", "right": "pull_request:43563"}, {"accept": false, "left": "pull_request:43324", "reason": "Different models and different test expectation updates; not the same underlying issue.", "right": "pull_request:43615"}, {"accept": false, "left": "pull_request:43488", "reason": "Both are unrelated repo-bot/style probes in the same test file, but they are distinct changes.", "right": "pull_request:43500"}, {"accept": false, "left": "pull_request:43938", "reason": "Different models and different failure modes; one is a test expectation refresh, the other a parallelism code fix.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:44519", "reason": "Marian vs MusicGen integration tests; both are dtype/test fixes, but for unrelated models and failures.", "right": "pull_request:44527"}, {"accept": false, "left": "pull_request:43464", "reason": "Different integration tests for different models; no shared bug or code path.", "right": "pull_request:44527"}, {"accept": false, "left": "pull_request:44456", "reason": "DepthPro test dtype fix and MusicGen stereo test dtype fix are separate model-specific failures.", "right": "pull_request:44527"}, {"accept": false, "left": "pull_request:44515", "reason": "GPT-Neo generation test tweak versus MusicGen stereo integration fix; unrelated concrete issues.", "right": "pull_request:44527"}, {"accept": false, "left": "pull_request:44490", "reason": "Both mention model parallelism, but they fix different architectures and code paths; too broad to treat as duplicates.", "right": "pull_request:45214"}, {"accept": false, "left": "pull_request:43324", "reason": "Minimax test expectation updates and EuroBERT parallelism bug fix are unrelated changes.", "right": "pull_request:44490"}], "summary": "This cluster is heterogeneous: it mixes unrelated model test expectation updates, typing/CLI cleanup, tokenizer mapping edits, and a few distinct model-parallel bug fixes. I don\u2019t see any pair that looks like the same underlying bug/change, so none of the soft edges should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44179|pull_request:44235", "pull_request:44566|pull_request:45190", "pull_request:43324|pull_request:43938", "pull_request:43339|pull_request:45214", "pull_request:43488|pull_request:43563", "pull_request:43324|pull_request:43615", "pull_request:43488|pull_request:43500", "pull_request:43938|pull_request:45214", "pull_request:44519|pull_request:44527", "pull_request:43464|pull_request:44527", "pull_request:44456|pull_request:44527", "pull_request:44515|pull_request:44527", "pull_request:44490|pull_request:45214", "pull_request:43324|pull_request:44490"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22856, "estimated_input_tokens": 11300, "item_count": 18, "node_count": 18, "serialized_chars": 45199, "soft_pair_count": 15}, "cached_at": "2026-04-14T17:48:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "554021489d2dca2b862df7a8dfa762bff54a48d24e770e63558e0e34ff6599de", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43339", "pull_request:43500", "pull_request:43563", "pull_request:43879", "pull_request:43902", "pull_request:43913", "pull_request:43938", "pull_request:44037", "pull_request:44321", "pull_request:44330", "pull_request:44353", "pull_request:44428", "pull_request:44482", "pull_request:44490", "pull_request:44602", "pull_request:44657", "pull_request:44733", "pull_request:44828"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:44330 is the best global PR to keep as canonical because it addresses the core underlying API cleanup across many models, has the widest technical impact, and its follow-up PRs are clearly incremental continuations of the same change.", "canonical_issue_reason": null, "canonical_pr_reason": "pull_request:44330 is the strongest representative PR: it is a substantive code-path change, broad in scope, and sits at the center of the repeated cache_position-removal series that continues in 44602 and 44828.", "confidence": 0.83, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43913", "reason": "Same underlying qwen3_vl_moe weight-mapping bug in the same conversion files; 44037 is a refined implementation of the same fix and could plausibly be merged into one PR.", "right": "pull_request:44037"}, {"accept": false, "left": "pull_request:43500", "reason": "Both are unrelated bot/style placeholder edits in ViT tests, not the same concrete bug or change.", "right": "pull_request:43563"}, {"accept": false, "left": "pull_request:43938", "reason": "Different models and different failure modes: exaone_moe test expectations vs eurobert model-parallel masking fix.", "right": "pull_request:44490"}, {"accept": false, "left": "pull_request:43339", "reason": "Different model-specific test adjustments: lw_detr XPU expectations vs VoxtralRealtime test skips.", "right": "pull_request:44321"}, {"accept": false, "left": "pull_request:44428", "reason": "Both add XPU expectations, but for different models and different test data; not the same underlying change.", "right": "pull_request:44482"}, {"accept": false, "left": "pull_request:44482", "reason": "Different targets and fixes: higgs_audio_v2 expectations vs qwen2/jamba flash-attention/kernel bug.", "right": "pull_request:44733"}, {"accept": false, "left": "pull_request:43339", "reason": "Different models and unrelated test-only changes; no shared bug or code path.", "right": "pull_request:43938"}, {"accept": false, "left": "pull_request:44428", "reason": "Vibevoice tokenizer fixture update is unrelated to the qwen2/jamba flash-attention/kernel fix.", "right": "pull_request:44733"}, {"accept": true, "left": "pull_request:44330", "reason": "Same cache_position-removal code-path cleanup across models; 44602 is a direct continuation of 44330's change series.", "right": "pull_request:44602"}, {"accept": false, "left": "pull_request:44353", "reason": "Different models and different expectation updates; no shared underlying defect.", "right": "pull_request:44482"}, {"accept": false, "left": "pull_request:44353", "reason": "Different model test expectations for different runtimes; not a duplicate.", "right": "pull_request:44428"}, {"accept": false, "left": "pull_request:44353", "reason": "OLMo hybrid XPU expectations are unrelated to qwen2/jamba flash-attention fixes.", "right": "pull_request:44733"}, {"accept": false, "left": "pull_request:44490", "reason": "Eurobert model-parallelism fix is unrelated to the Electra test configuration mismatch.", "right": "pull_request:44657"}, {"accept": true, "left": "pull_request:43879", "reason": "Same stale-tech-stack documentation update; both change installation/docs wording to the newer Python/PyTorch versions and could be combined.", "right": "pull_request:43902"}, {"accept": true, "left": "pull_request:44330", "reason": "Same cache_position-removal series and same underlying API cleanup across model forwards; 44828 is another continuation of the same change.", "right": "pull_request:44828"}], "summary": "This set is mostly unrelated PRs, with a few true duplicates/continuations: the qwen3_vl_moe conversion-mapping fixes, the old-tech-stack doc updates, and the cache_position-removal series. The model-specific expectation/test-only PRs are not duplicates of each other."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43913|pull_request:44037", "pull_request:43500|pull_request:43563", "pull_request:43938|pull_request:44490", "pull_request:43339|pull_request:44321", "pull_request:44428|pull_request:44482", "pull_request:44482|pull_request:44733", "pull_request:43339|pull_request:43938", "pull_request:44428|pull_request:44733", "pull_request:44330|pull_request:44602", "pull_request:44353|pull_request:44482", "pull_request:44353|pull_request:44428", "pull_request:44353|pull_request:44733", "pull_request:44490|pull_request:44657", "pull_request:43879|pull_request:43902", "pull_request:44330|pull_request:44828"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20668, "estimated_input_tokens": 10206, "item_count": 18, "node_count": 18, "serialized_chars": 40824, "soft_pair_count": 11}, "cached_at": "2026-04-14T17:48:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "be95369bc0435e09acc366d90fa717139c33b4274263daeaeee8d7edd10d7a6d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42230", "pull_request:43488", "pull_request:43740", "pull_request:43787", "pull_request:43793", "pull_request:43884", "pull_request:43907", "pull_request:43910", "pull_request:43936", "pull_request:43938", "pull_request:44034", "pull_request:44353", "pull_request:44482", "pull_request:44657", "pull_request:44808", "pull_request:45204", "pull_request:45209", "pull_request:45212"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:44808 is the strongest global representative: it is the most complete substantive implementation among the candidates and directly overlaps the earlier partial PP-OCRv5 mobile_rec PR.", "canonical_issue_reason": null, "canonical_pr_reason": "pull_request:44808 is the best canonical PR because it is the broader, more complete PP-OCRv5 support change and clearly subsumes the earlier mobile_rec-only addition in pull_request:43793 across docs, auto registries, model files, and tests.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43907", "reason": "Both tweak test expectations, but for different models and different failure modes; not the same underlying bug or change.", "right": "pull_request:43910"}, {"accept": false, "left": "pull_request:42230", "reason": "Both mention XPU, but one fixes attention mask handling in model code while the other adds XPU test support for a different model.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:43740", "reason": "Both remove PT<2.4-era code, but one is a single-model cleanup and the other is a broad multi-model refactor; not mergeable as one concrete change.", "right": "pull_request:43787"}, {"accept": false, "left": "pull_request:42230", "reason": "Different targets: SDPA/XPU attention-mask fix versus xpu-specific test expectations for olmo_hybrid.", "right": "pull_request:44353"}, {"accept": false, "left": "pull_request:42230", "reason": "Different models and different layers of the stack; one is runtime masking logic, the other is test expectation updates.", "right": "pull_request:44482"}, {"accept": false, "left": "pull_request:43936", "reason": "Moonshine device placement fix vs exaone_moe test expectation update; unrelated code paths.", "right": "pull_request:43938"}, {"accept": false, "left": "pull_request:43488", "reason": "Both are workflow/check-related, but one is a setup/versioning bot check and the other injects failures into tests; not the same change.", "right": "pull_request:43884"}, {"accept": false, "left": "pull_request:43488", "reason": "Same as above: both touch check workflows indirectly, but they are distinct prank/guard PRs affecting different test files and behaviors.", "right": "pull_request:44034"}, {"accept": false, "left": "pull_request:45209", "reason": "Both aim at device-robust tests, but for different models and different mechanisms; not a single underlying bug.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:44657", "reason": "Electra test config rounding fix and Videomt device-mismatch fix are unrelated model-specific issues.", "right": "pull_request:45204"}, {"accept": true, "left": "pull_request:43793", "reason": "44808 extends and overlaps the earlier PP-OCRv5_mobile_rec support PR across the same model family, docs, auto mappings, and tests; it is plausibly a merged superset of the same feature work.", "right": "pull_request:44808"}], "summary": "Mostly a grab-bag of unrelated PRs. The only strong duplicate-like overlap is the PP-OCRv5 mobile_rec model-support pair, where the later combined model-support PR subsumes the earlier mobile_rec-only PR. The rest are broad similarity matches but not the same concrete bug/change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43907|pull_request:43910", "pull_request:42230|pull_request:45212", "pull_request:43740|pull_request:43787", "pull_request:42230|pull_request:44353", "pull_request:42230|pull_request:44482", "pull_request:43936|pull_request:43938", "pull_request:43488|pull_request:43884", "pull_request:43488|pull_request:44034", "pull_request:45209|pull_request:45212", "pull_request:44657|pull_request:45204", "pull_request:43793|pull_request:44808"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 22054, "estimated_input_tokens": 10899, "item_count": 18, "node_count": 18, "serialized_chars": 43596, "soft_pair_count": 20}, "cached_at": "2026-04-14T17:50:18Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "83ef503206811425cb98cbb743c15a6afac0c168f574270ffe59d238f026f7e7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42230", "pull_request:43341", "pull_request:43553", "pull_request:43554", "pull_request:43555", "pull_request:43758", "pull_request:43795", "pull_request:43907", "pull_request:43910", "pull_request:43936", "pull_request:44412", "pull_request:44647", "pull_request:44808", "pull_request:45204", "pull_request:45209", "pull_request:45212", "pull_request:45415", "pull_request:45425"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "45415 is the broadest typing-related PR and the closest umbrella over the type-checking subgroup, but the overall cluster is too heterogeneous for it to be a true canonical representative.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR stands out because the cluster splits into several unrelated themes (OCR model support, GLM/tests, workflow permissions, typing, and device-specific fixes).", "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43795", "reason": "Related model-support work, but 44808 adds mobile_rec in addition to server_rec, so this is not the same concrete change.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:45204", "reason": "Different models and different fixes: videomt device-mismatch code-path vs a Nomic BERT test output adjustment.", "right": "pull_request:45209"}, {"accept": false, "left": "pull_request:42230", "reason": "Both involve device handling, but one changes attention-mask unmasking for XPU/CUDA while the other fixes videomt query token placement; not the same bug.", "right": "pull_request:45204"}, {"accept": false, "left": "pull_request:42230", "reason": "Device-related in a broad sense, but these touch unrelated code paths and models.", "right": "pull_request:45209"}, {"accept": false, "left": "pull_request:45204", "reason": "Unrelated model-specific device fixes; no shared concrete code-path problem.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:44412", "reason": "Both are typing-related, but 44412 is a targeted quantizers type-checking PR while 45415 is a much broader repo-wide typing sweep.", "right": "pull_request:45415"}, {"accept": false, "left": "pull_request:43341", "reason": "Same test file family, but one skips unsupported offload tests and the other updates expected outputs; different issues.", "right": "pull_request:43910"}, {"accept": false, "left": "pull_request:43341", "reason": "Different models and failures; only superficial similarity in being test-related.", "right": "pull_request:43936"}, {"accept": false, "left": "pull_request:43910", "reason": "Different model test fixes with no shared concrete bug or change.", "right": "pull_request:43936"}, {"accept": false, "left": "pull_request:43341", "reason": "Same GLM image test file, but one adds xfail skips for unsupported offload tests and the other updates generation expectations; not the same underlying change.", "right": "pull_request:43907"}, {"accept": false, "left": "pull_request:43907", "reason": "Different model/test paths; no evidence they address the same bug.", "right": "pull_request:43936"}, {"accept": false, "left": "pull_request:44647", "reason": "Both mention device support, but they are for different subsystems and different models; not mergeable as one fix.", "right": "pull_request:45212"}, {"accept": false, "left": "pull_request:43341", "reason": "Different models and different failure modes; only broad test/device churn in common.", "right": "pull_request:45204"}, {"accept": false, "left": "pull_request:43910", "reason": "Unrelated model test update vs videomt runtime device fix.", "right": "pull_request:45204"}, {"accept": false, "left": "pull_request:43907", "reason": "Different models and different code paths; no duplicate signal.", "right": "pull_request:45204"}, {"accept": false, "left": "pull_request:45415", "reason": "Both are typing-related, but 45425 is a focused modeling_utils typing follow-up while 45415 is a broad open sweep across many files.", "right": "pull_request:45425"}, {"accept": false, "left": "pull_request:43553", "reason": "These are sequential edits to the same workflow file, but they change different bot settings and are not the same underlying change.", "right": "pull_request:43554"}, {"accept": false, "left": "pull_request:43553", "reason": "Same workflow file, but part 1 adds the bot workflow and part 3 changes permissions; related, not duplicate.", "right": "pull_request:43555"}, {"accept": false, "left": "pull_request:43758", "reason": "Both stabilize tests, but for different models and different assertions; not the same bug.", "right": "pull_request:43910"}, {"accept": false, "left": "pull_request:43554", "reason": "Same workflow file, but one changes style command type and the other adds contents permission; distinct follow-up edits.", "right": "pull_request:43555"}], "summary": "This cluster is mostly a mix of unrelated PRs: model additions, test updates, workflow permission tweaks, and typing changes. I found no strong duplicate pairs; most overlaps are only at the subsystem or file level, not the same concrete change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43795|pull_request:44808", "pull_request:45204|pull_request:45209", "pull_request:42230|pull_request:45204", "pull_request:42230|pull_request:45209", "pull_request:45204|pull_request:45212", "pull_request:44412|pull_request:45415", "pull_request:43341|pull_request:43910", "pull_request:43341|pull_request:43936", "pull_request:43910|pull_request:43936", "pull_request:43341|pull_request:43907", "pull_request:43907|pull_request:43936", "pull_request:44647|pull_request:45212", "pull_request:43341|pull_request:45204", "pull_request:43910|pull_request:45204", "pull_request:43907|pull_request:45204", "pull_request:45415|pull_request:45425", "pull_request:43553|pull_request:43554", "pull_request:43553|pull_request:43555", "pull_request:43758|pull_request:43910", "pull_request:43554|pull_request:43555"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 23752, "estimated_input_tokens": 11748, "item_count": 18, "node_count": 18, "serialized_chars": 46992, "soft_pair_count": 21}, "cached_at": "2026-04-14T17:51:12Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8e011c2f9251dc990305007bcff2bb3e88f1025a5a41a2a7f95e2f6d0c75f090", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43098", "pull_request:43247", "pull_request:43274", "pull_request:43345", "pull_request:43486", "pull_request:43500", "pull_request:43532", "pull_request:43554", "pull_request:43558", "pull_request:43563", "pull_request:43588", "pull_request:43767", "pull_request:43884", "pull_request:44034", "pull_request:44125", "pull_request:44544", "pull_request:44808", "pull_request:45078"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44808 is the broadest substantive feature addition in the set, but it still does not act as a true global representative for this mixed cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the items cover different models, workflow hacks, and separate code-path fixes, so there is no single duplicate representative.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43884", "reason": "Both are synthetic test/CI manipulation PRs, but they edit different tests with different injected failures; not the same underlying change.", "right": "pull_request:44034"}, {"accept": false, "left": "pull_request:43486", "reason": "Unrelated fixes: video batching for 5D arrays versus ANSI styling in loading reports.", "right": "pull_request:44544"}, {"accept": false, "left": "pull_request:43098", "reason": "Different model families and code paths: PP-DocLayoutV3 support versus PP-OCRv5 server/mobile rec support.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:43247", "reason": "PP-OCRv5_mobile_det support is a different model/addition from PP-OCRv5 server/mobile rec support.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:43274", "reason": "PP-OCRv5_server_det support is distinct from the server/mobile recognition models in 44808.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:43345", "reason": "PP-LCNet model support is unrelated to OCR recognition model support.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:43767", "reason": "PP-Chart2Table support is a different model and change surface from PP-OCRv5 recognition models.", "right": "pull_request:44808"}, {"accept": false, "left": "pull_request:43500", "reason": "These are different dummy test edits used for bot checks; they are not the same concrete bug or change.", "right": "pull_request:43884"}, {"accept": false, "left": "pull_request:43500", "reason": "Different CI/test-injection edits in the same file, but not the same underlying purpose or patch.", "right": "pull_request:44034"}, {"accept": false, "left": "pull_request:43588", "reason": "Qwen3 Omni feature-type return annotations and video processor auto-loading are unrelated fixes.", "right": "pull_request:44125"}, {"accept": false, "left": "pull_request:44125", "reason": "Video processor class lookup/error handling is unrelated to tokenizer conversion logic.", "right": "pull_request:45078"}, {"accept": false, "left": "pull_request:44125", "reason": "Video processor auto-loading and loading-report ANSI styling affect different subsystems and bugs.", "right": "pull_request:44544"}, {"accept": false, "left": "pull_request:43563", "reason": "Both are test-file bot-check edits, but they are different injected changes and not the same issue.", "right": "pull_request:43884"}, {"accept": false, "left": "pull_request:43563", "reason": "Same test file, but different synthetic edits; no single underlying code-path problem.", "right": "pull_request:44034"}, {"accept": false, "left": "pull_request:43554", "reason": "Workflow style-bot config change versus unrelated test-file insertion; not mergeable as one fix.", "right": "pull_request:43558"}, {"accept": false, "left": "pull_request:43558", "reason": "These are separate synthetic test edits with different contents, so they are not duplicates.", "right": "pull_request:43563"}, {"accept": false, "left": "pull_request:43098", "reason": "Different PP model support PRs for different architectures; too broad a theme to be duplicates.", "right": "pull_request:43247"}, {"accept": false, "left": "pull_request:43500", "reason": "One is a dummy test edit, the other is workflow permission plumbing; they do not fix the same concrete problem.", "right": "pull_request:43532"}], "summary": "This cluster is a mix of unrelated model-support PRs, CI/workflow experiments, and independent bug fixes; none form a true duplicate pair."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43884|pull_request:44034", "pull_request:43486|pull_request:44544", "pull_request:43098|pull_request:44808", "pull_request:43247|pull_request:44808", "pull_request:43500|pull_request:43884", "pull_request:43500|pull_request:44034", "pull_request:43588|pull_request:44125", "pull_request:44125|pull_request:45078", "pull_request:44125|pull_request:44544", "pull_request:43563|pull_request:43884", "pull_request:43563|pull_request:44034", "pull_request:43554|pull_request:43558", "pull_request:43558|pull_request:43563", "pull_request:43274|pull_request:44808", "pull_request:43345|pull_request:44808", "pull_request:43767|pull_request:44808", "pull_request:43500|pull_request:43532", "pull_request:43098|pull_request:43247", "pull_request:43098|pull_request:43274", "pull_request:43098|pull_request:43345", "pull_request:43098|pull_request:43767"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 16080, "estimated_input_tokens": 7912, "item_count": 17, "node_count": 17, "serialized_chars": 31647, "soft_pair_count": 18}, "cached_at": "2026-04-14T17:52:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "46874b55a794680676fe3da6636e5cf41bd1805d021a52a4700b600d3b88f419", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43098", "pull_request:43247", "pull_request:43274", "pull_request:43345", "pull_request:43445", "pull_request:43532", "pull_request:43767", "pull_request:43793", "pull_request:43795", "pull_request:43884", "pull_request:44034", "pull_request:44053", "pull_request:44413", "pull_request:44827", "pull_request:45286", "pull_request:45287", "pull_request:45288"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43247 is the most suitable global representative: it has the strongest discussion/review activity, touches the standard model-support surface area, and is broadly representative of the repeated PP model-support entries. It is still not a duplicate of the others.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43247 is the best single representative of the cluster because it is a substantial, well-discussed model-support addition and sits in the middle of the repeated PP-* support theme. That said, the cluster is not a true duplicate set, so this is only a representative choice, not a duplicate canonical.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43098", "reason": "Both are model-support PRs, but for different models (PP-DocLayoutV3 vs PP-OCRv5_mobile_rec). Same broad docs/auto-registry files are not enough to make them duplicates.", "right": "pull_request:43793"}, {"accept": false, "left": "pull_request:43098", "reason": "Different model support targets (PP-DocLayoutV3 vs PP-OCRv5_server_rec). Overlap is only in shared registration/docs plumbing.", "right": "pull_request:43795"}, {"accept": false, "left": "pull_request:43247", "reason": "These add support for different PP-OCRv5 variants (mobile_det vs server_det). They are related, but not the same bug/change.", "right": "pull_request:43274"}, {"accept": false, "left": "pull_request:43247", "reason": "PP-OCRv5_mobile_det and PP-LCNet are unrelated model additions; shared auto/docs files are just common support scaffolding.", "right": "pull_request:43345"}, {"accept": false, "left": "pull_request:43247", "reason": "PP-OCRv5_mobile_det and PP-Chart2Table are distinct model-support changes with different code paths and artifacts.", "right": "pull_request:43767"}, {"accept": false, "left": "pull_request:43247", "reason": "Different model variants and different support files; this is not one concrete underlying change.", "right": "pull_request:43793"}, {"accept": false, "left": "pull_request:43247", "reason": "Mobile_det support and server_rec support are separate model additions, even though they share the PP-OCRv5 family.", "right": "pull_request:43795"}, {"accept": false, "left": "pull_request:43274", "reason": "PP-OCRv5_server_det and PP-LCNet are unrelated model-support PRs; same registry/docs files do not imply duplication.", "right": "pull_request:43345"}, {"accept": false, "left": "pull_request:43274", "reason": "Different model families and different generated support files; not the same underlying change.", "right": "pull_request:43767"}, {"accept": false, "left": "pull_request:43274", "reason": "Server_det support and mobile_rec support are distinct model additions with different implementation details.", "right": "pull_request:43793"}, {"accept": false, "left": "pull_request:43274", "reason": "Both are PP-OCRv5 family support, but one is server_det and the other is server_rec; they are separate additions, not one fix.", "right": "pull_request:43795"}, {"accept": false, "left": "pull_request:43532", "reason": "43532 is a workflow/diff plumbing PR, while 43884 is a deliberate test-breaking change in vit. They do not fix the same code-path problem.", "right": "pull_request:43884"}, {"accept": false, "left": "pull_request:43532", "reason": "43532 targets workflow/tooling behavior; 44034 is another intentional failing-test PR. Shared test filenames are incidental.", "right": "pull_request:44034"}, {"accept": false, "left": "pull_request:43445", "reason": "43445 fixes MoE router/conversion logic for specific models, while 44827 is a Mistral4 test-oriented change. Same file overlap is too broad to be a duplicate.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:45286", "reason": "These auto-fix different model test files (nomic_bert vs videomt). They are unrelated test adjustments, not one underlying fix.", "right": "pull_request:45287"}, {"accept": false, "left": "pull_request:45286", "reason": "Different model test suites and different failures; not the same concrete bug.", "right": "pull_request:45288"}, {"accept": false, "left": "pull_request:45287", "reason": "These are separate auto-fixes for different model test suites, so they should not be merged as duplicates.", "right": "pull_request:45288"}, {"accept": false, "left": "pull_request:44053", "reason": "Both touch peft.py, but they appear to fix different small mapping/typo issues in different parts of the conversion helper. Same file alone is not enough to accept.", "right": "pull_request:44413"}], "summary": "This set is mostly unrelated PRs that only overlap at a broad subsystem level (docs, auto registries, shared test scaffolding, or the same integration file). I do not see any true duplicate pairs; the closest matches are still separate concrete model additions or separate small bug fixes."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43098|pull_request:43793", "pull_request:43098|pull_request:43795", "pull_request:43247|pull_request:43274", "pull_request:43247|pull_request:43345", "pull_request:43247|pull_request:43767", "pull_request:43247|pull_request:43793", "pull_request:43247|pull_request:43795", "pull_request:43274|pull_request:43345", "pull_request:43274|pull_request:43767", "pull_request:43274|pull_request:43793", "pull_request:43274|pull_request:43795", "pull_request:43532|pull_request:43884", "pull_request:43532|pull_request:44034", "pull_request:43445|pull_request:44827", "pull_request:45286|pull_request:45287", "pull_request:45286|pull_request:45288", "pull_request:45287|pull_request:45288", "pull_request:44053|pull_request:44413"], "split": true, "trimmed": true} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 15894, "estimated_input_tokens": 7819, "item_count": 18, "node_count": 18, "serialized_chars": 31274, "soft_pair_count": 23}, "cached_at": "2026-04-14T17:55:41Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2150ad45939ecf844f3a297691a91a6f870e3055bf24e12b2ee7447d2782c8fb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41145", "pull_request:42227", "pull_request:43098", "pull_request:43247", "pull_request:43274", "pull_request:43291", "pull_request:43345", "pull_request:43385", "pull_request:43707", "pull_request:44395", "pull_request:44413", "pull_request:44542", "pull_request:44634", "pull_request:45286", "pull_request:45287", "pull_request:45288", "pull_request:45403", "pull_request:45410"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44395 is the best standalone representative of the set: it is merged, linked to an issue, and includes a concrete fix with supporting docs/tests. The others are separate model additions or narrow test/backend/kernel tweaks.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR is a true duplicate anchor here; the set is heterogeneous. If one PR must be chosen as the most representative standalone change, 44395 is the strongest candidate because it is merged, issue-linked, and a substantive code/docs/tests fix.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44395", "reason": "Different fixes in different areas: kernels security/loading behavior vs a Higgs Audio backend dependency change.", "right": "pull_request:44542"}, {"accept": false, "left": "pull_request:44413", "reason": "Unrelated code paths: PEFT conversion mapping logic vs a backend dependency fix.", "right": "pull_request:44542"}, {"accept": false, "left": "pull_request:44542", "reason": "Both touch model internals, but they fix different problems in different model families and code paths.", "right": "pull_request:44634"}, {"accept": false, "left": "pull_request:42227", "reason": "Whisper pipeline return-language behavior is not the same as whisper tokenizer test adjustments.", "right": "pull_request:43291"}, {"accept": false, "left": "pull_request:45286", "reason": "Different models and different failing tests; no shared underlying bug.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:45287", "reason": "Different models and different failing tests; no shared underlying bug.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:45288", "reason": "Different models and different failing tests; no shared underlying bug.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:45403", "reason": "Different test files for different model families; these are separate fixes.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:45286", "reason": "Distinct test-only fixes for different models, not one shared defect.", "right": "pull_request:45403"}, {"accept": false, "left": "pull_request:45287", "reason": "Distinct test-only fixes for different models, not one shared defect.", "right": "pull_request:45403"}, {"accept": false, "left": "pull_request:45288", "reason": "Distinct test-only fixes for different models, not one shared defect.", "right": "pull_request:45403"}, {"accept": false, "left": "pull_request:43385", "reason": "Both add model support, but for different models (UVDoc vs SLANeXt) and different implementation details.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:43098", "reason": "Separate model-support additions for different models; same broad pattern but not the same change.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:43098", "reason": "Separate model-support additions for different models; same broad pattern but not the same change.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:43247", "reason": "Different model support PRs for different OCR models; not a single fix.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:43247", "reason": "Different model support PRs for different OCR models; not a single fix.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:43274", "reason": "Different model support PRs for different models; only the template is shared.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:43274", "reason": "Different model support PRs for different models; only the template is shared.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:43345", "reason": "Both are model-support PRs, but for different models and separate code additions.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:43345", "reason": "Both are model-support PRs, but for different models and separate code additions.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:41145", "reason": "Qwen3 rope kernel support is unrelated to UVDoc model support.", "right": "pull_request:43385"}, {"accept": false, "left": "pull_request:41145", "reason": "Qwen3 rope kernel support is unrelated to SLANeXt model support.", "right": "pull_request:43707"}, {"accept": false, "left": "pull_request:41145", "reason": "Different model support work in unrelated model families; not one underlying bug/change.", "right": "pull_request:43098"}], "summary": "The soft-similarity hits are mostly template-driven matches across unrelated PRs: different model-support additions, distinct whisper fixes, separate test-only autofixes, and unrelated kernel/integration changes. None looks like the same underlying change, so the soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44395|pull_request:44542", "pull_request:44413|pull_request:44542", "pull_request:44542|pull_request:44634", "pull_request:42227|pull_request:43291", "pull_request:45286|pull_request:45410", "pull_request:45287|pull_request:45410", "pull_request:45288|pull_request:45410", "pull_request:45403|pull_request:45410", "pull_request:45286|pull_request:45403", "pull_request:45287|pull_request:45403", "pull_request:45288|pull_request:45403", "pull_request:43385|pull_request:43707", "pull_request:43098|pull_request:43385", "pull_request:43098|pull_request:43707", "pull_request:43247|pull_request:43385", "pull_request:43247|pull_request:43707", "pull_request:43274|pull_request:43385", "pull_request:43274|pull_request:43707", "pull_request:43345|pull_request:43385", "pull_request:43345|pull_request:43707", "pull_request:41145|pull_request:43385", "pull_request:41145|pull_request:43707", "pull_request:41145|pull_request:43098"], "split": true, "trimmed": true} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 12964, "estimated_input_tokens": 6354, "item_count": 11, "node_count": 11, "serialized_chars": 25414, "soft_pair_count": 6}, "cached_at": "2026-04-14T17:56:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8bac9d03a66a3584e7253c68600c26d21ef5ceeab81877ddb70d71888ed8d467", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43291", "pull_request:43385", "pull_request:43647", "pull_request:43648", "pull_request:43665", "pull_request:43707", "pull_request:44053", "pull_request:44571", "pull_request:44815", "pull_request:44994", "pull_request:45410"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "43385 is the best global representative because it is a merged, full-featured PR with docs, config, model, image-processing, and test updates; the other PRs are either smaller fixes or unrelated changes.", "canonical_issue_reason": null, "canonical_pr_reason": "No issue artifact is present; as a representative PR, 43385 is the strongest canonical candidate because it is a merged, substantial model-support addition with broad implementation and test coverage.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44053", "reason": "Both are PEFT/pegasus-related fixes at a very high level, but they address different code paths: one corrects a PEFT mapping attribute name, the other changes Pegasus conversion/dependency handling. Not the same concrete change.", "right": "pull_request:44571"}, {"accept": false, "left": "pull_request:43647", "reason": "Both touch self-comment CI workflow files, but the diffs are different workflow edits with different affected files and logic. They look related, not like the same underlying bug/change that should be merged into one PR.", "right": "pull_request:43648"}, {"accept": false, "left": "pull_request:43385", "reason": "These are separate model-introduction PRs for different models (UVDoc vs Sarvam) with different files and functionality. Same broad category, not the same change.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:43707", "reason": "Different model additions for different architectures and code paths. They are not duplicates of the same underlying work.", "right": "pull_request:44994"}, {"accept": false, "left": "pull_request:43291", "reason": "Whisper tokenizer test/fix work is unrelated to the AltCLIP token_type_ids/device fix. No shared concrete bug or mergeable change.", "right": "pull_request:45410"}, {"accept": false, "left": "pull_request:43665", "reason": "One is an intentional test-failing stub PR, the other is an fp8/dequant fix spanning model loading and quantization code. They are unrelated.", "right": "pull_request:44815"}], "summary": "This cluster is mostly a grab-bag of unrelated pull requests: model-addition PRs, CI workflow tweaks, small typo/fix PRs, and an fp8 dequant change. No shared underlying bug or change theme spans the set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44053|pull_request:44571", "pull_request:43647|pull_request:43648", "pull_request:43385|pull_request:44994", "pull_request:43707|pull_request:44994", "pull_request:43291|pull_request:45410", "pull_request:43665|pull_request:44815"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2356, "estimated_input_tokens": 1050, "item_count": 2, "node_count": 2, "serialized_chars": 4199, "soft_pair_count": 1}, "cached_at": "2026-04-14T17:56:41Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e78442b1151aa5637a378b6604ee8f885dfb43d9ca321c0ea7430e53e4c699c5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:39144", "pull_request:39177"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 39177 is the stronger representative only in the sense that it is merged and has a complete, concrete trainer fix; 39144 is a separate GPTQ/XPU bug fix.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the changes address different bugs in different modules and are not the same underlying fix.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:39144", "reason": "Different underlying bugs and code paths. 39144 fixes GPTQ device mapping on XPU; 39177 fixes Trainer/FSDP model/device placement and optimizer creation. They are not plausibly one merged PR.", "right": "pull_request:39177"}], "summary": "These two PRs are unrelated fixes in different code paths: GPTQ/XPU device-map handling vs Trainer/FSDP optimizer/model placement. They should not be clustered as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:39144|pull_request:39177"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5786, "estimated_input_tokens": 2765, "item_count": 4, "node_count": 4, "serialized_chars": 11057, "soft_pair_count": 4}, "cached_at": "2026-04-14T17:57:22Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e1f06652b03eb22e646fa666dca9fd5a1d412aeab0e03a8a11599a5729018f18", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:39722", "pull_request:43174", "pull_request:43458", "pull_request:43538"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43538 is the strongest representative of the cluster: it is merged, broadest in scope, and appears to consolidate the Music Flamingo feature end-to-end rather than only one integration slice.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43538 is the best canonical PR because it is the merged, most complete Music Flamingo addition: it includes the model package, auto registrations, docs, tests, and the AudioFlamingo3 integration pieces, so it most likely subsumes the narrower related PRs.", "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:39722", "reason": "Intern-S1 docs/model scaffolding is unrelated to Music Flamingo / AudioFlamingo3 support; no shared underlying change.", "right": "pull_request:43458"}, {"accept": false, "left": "pull_request:43174", "reason": "Both mention Music Flamingo, but one is a general model-add PR and the other is specifically AudioFlamingo3 support; they are related feature work, not the same concrete change.", "right": "pull_request:43458"}, {"accept": true, "left": "pull_request:43174", "reason": "Both PRs are titled as Music Flamingo additions and appear to target the same underlying feature; #43538 looks like the later consolidated version.", "right": "pull_request:43538"}, {"accept": true, "left": "pull_request:43458", "reason": "#43538 clearly subsumes the AudioFlamingo3 Music Flamingo support changes from #43458 with the full model/docs/registry integration, so they represent the same underlying feature work.", "right": "pull_request:43538"}], "summary": "This cluster is mostly a Music Flamingo feature lineage: two PRs appear to be duplicate/superseding implementations of the same model addition, while one PR is a narrower AudioFlamingo3 support change and one PR is unrelated Intern-S1 work."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:39722|pull_request:43458", "pull_request:43174|pull_request:43538", "pull_request:43174|pull_request:43458", "pull_request:43458|pull_request:43538"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3224, "estimated_input_tokens": 1484, "item_count": 2, "node_count": 2, "serialized_chars": 5933, "soft_pair_count": 1}, "cached_at": "2026-04-14T17:57:40Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c7d7a0f7bdebcaab15649d2847ea4287eb9ad7a6cb24d38725db97446855206f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40962", "pull_request:43166"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43166 is the strongest standalone PR in the cluster: it is merged and has a complete implementation/docs/tests package for one model.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43166 is the better representative artifact because it is merged, self-contained, and clearly scoped to the Youtu-LLM model addition.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:40962", "reason": "Both add different models (Isaac vs Youtu-LLM); shared files are just common Transformers registration/docs scaffolding, not the same code-path fix or mergeable change.", "right": "pull_request:43166"}], "summary": "Two unrelated model-addition PRs with overlapping Transformers boilerplate; they do not appear to be the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:40962|pull_request:43166"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3994, "estimated_input_tokens": 1869, "item_count": 3, "node_count": 3, "serialized_chars": 7473, "soft_pair_count": 2}, "cached_at": "2026-04-14T17:57:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6149c59e7f02e0b7fcacd266bf0874534cd5eb8eae434b79fdd32f16d7a486c8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41251", "pull_request:43912", "pull_request:44797"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No single PR is a good canonical representative because the three PRs are unrelated feature/fix areas. If forced to pick one, PR 43912 is the most self-contained merged code change, but it is not actually related to the others.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:41251", "reason": "Different models and different change types: DeepSeek 3.2 support vs GlmMoeDsa implementation. No shared bug or concrete code path.", "right": "pull_request:43912"}, {"accept": false, "left": "pull_request:41251", "reason": "DeepSeek model support is unrelated to FA4 kernel fallback. They modify different subsystems and are not mergeable as one PR.", "right": "pull_request:44797"}], "summary": "The cluster does not contain a true duplicate set: one PR adds DeepSeek-V3.2 support, one implements GlmMoeDsa, and one adds FA4 kernel fallback. They touch different code paths and cannot plausibly be merged as one change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:41251|pull_request:43912", "pull_request:41251|pull_request:44797"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2534, "estimated_input_tokens": 1139, "item_count": 2, "node_count": 2, "serialized_chars": 4553, "soft_pair_count": 1}, "cached_at": "2026-04-14T17:58:08Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4816d4318ad66e62b7c0241793a7970ea2f16b887296d3fdc0a2c0281c9fadf4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41307", "pull_request:42135"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #41307 is the best representative because it makes the actual project-wide version floor change; #42135 is documentation-only and too narrow to stand in for the underlying change.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #41307 is the substantive change: it updates the minimum Torch version in setup/dependency tables and related code paths. PR #42135 only edits installation docs and does not fix the same concrete problem.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:41307", "reason": "Same general topic, but not the same underlying change. #41307 updates dependency/version requirements in code and packaging; #42135 only updates documentation text.", "right": "pull_request:42135"}], "summary": "These are related only at a high level: both mention raising the PyTorch version, but one is the real dependency bump across packaging/code and the other is a docs-only wording update. They should not be clustered as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:41307|pull_request:42135"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3422, "estimated_input_tokens": 1583, "item_count": 2, "node_count": 2, "serialized_chars": 6329, "soft_pair_count": 1}, "cached_at": "2026-04-14T17:58:35Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d1f30833007cceff42db9277ae6866e5cfaf47f7882fd34219829ef38e1597fc", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41545", "pull_request:45176"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No global best PR for this cluster: neither PR is a duplicate of the other, so there is no single representative change to prefer.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the PRs address different model families and different code paths, so they should not be collapsed into one representative change.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:41545", "reason": "Reject: PR 41545 adds Parakeet TDT support, while PR 45176 adds EfficientVITSAM. Shared auto files are incidental registry updates, not the same underlying bug or feature.", "right": "pull_request:45176"}], "summary": "These are unrelated model-integration PRs: one adds Parakeet TDT support, the other adds EfficientVITSAM. The only overlap is generic auto-registry plumbing, which is not enough to treat them as the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:41545|pull_request:45176"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 8946, "estimated_input_tokens": 4345, "item_count": 7, "node_count": 7, "serialized_chars": 17377, "soft_pair_count": 8}, "cached_at": "2026-04-14T17:59:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "55b6d8b55953829c419be87de1d261fe2483ece9f2275ea81f4bdbc335caadc7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41797", "pull_request:43695", "pull_request:43780", "pull_request:43853", "pull_request:43882", "pull_request:44607", "pull_request:45186"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:41797", "reason": "Both add different new models (DeepSeek OCR vs Isaac); shared docs/auto-registration scaffolding is incidental, not the same change.", "right": "pull_request:45186"}, {"accept": false, "left": "pull_request:43695", "reason": "Same broad gpt-oss/tensor-parallel theme, but different fixes: one changes TP mapping for `sinks`, the other fixes an `mxfp4` routing call. Different concrete code paths, so not a single mergeable PR.", "right": "pull_request:43853"}, {"accept": false, "left": "pull_request:43695", "reason": "Unrelated subsystems: gpt-oss TP config versus Trainer/DeepSpeed model preparation logic.", "right": "pull_request:43780"}, {"accept": false, "left": "pull_request:43695", "reason": "Different models and bugs: gpt-oss TP crash versus llama4 image feature output type/key handling.", "right": "pull_request:43882"}, {"accept": false, "left": "pull_request:43780", "reason": "Trainer DeepSpeed preparation fix is unrelated to the gpt-oss TP/mxfp4 routing crash fix.", "right": "pull_request:43853"}, {"accept": false, "left": "pull_request:43853", "reason": "Different models and failures: gpt-oss TP routing versus llama4 image feature extraction behavior.", "right": "pull_request:43882"}, {"accept": false, "left": "pull_request:43882", "reason": "Separate model work: llama4 output handling fix versus adding the Isaac model.", "right": "pull_request:45186"}, {"accept": false, "left": "pull_request:44607", "reason": "Idefics3/SmolVLM cache-generation fix is unrelated to adding the Isaac model.", "right": "pull_request:45186"}], "summary": "No actual duplicate cluster here: most items are unrelated model additions or single-file bug fixes. The only close-looking pair is the two gpt-oss TP crash PRs, but they patch different code paths (TP sharding config vs mxfp4 routing call) so they should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:41797|pull_request:45186", "pull_request:43695|pull_request:43853", "pull_request:43695|pull_request:43780", "pull_request:43695|pull_request:43882", "pull_request:43780|pull_request:43853", "pull_request:43853|pull_request:43882", "pull_request:43882|pull_request:45186", "pull_request:44607|pull_request:45186"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19798, "estimated_input_tokens": 9771, "item_count": 15, "node_count": 15, "serialized_chars": 39084, "soft_pair_count": 24}, "cached_at": "2026-04-14T18:01:05Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f00b83c7a9d262e71c81ef65be61588c8f6762ba3715aad762ea4d333e40f783", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42166", "pull_request:42504", "pull_request:43177", "pull_request:43331", "pull_request:44711", "pull_request:44904", "pull_request:45019", "pull_request:45079", "pull_request:45211", "pull_request:45334", "pull_request:45345", "pull_request:45351", "pull_request:45352", "pull_request:45371", "pull_request:45426"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "45079 is the strongest overall representative in this set: merged, focused on one concrete bug, and backed by targeted test coverage.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR cleanly represents the whole cluster because it mixes unrelated changes. If one must be chosen, 45079 is the best representative standalone bugfix: it is merged, tightly scoped, and directly addresses a concrete resize/post-init regression with tests.", "confidence": 0.63, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45211", "reason": "Same underlying fix: both correct the return type annotation for Qwen3MoeSparseMoeBlock.forward across the model/modular variants.", "right": "pull_request:45352"}, {"accept": false, "left": "pull_request:44711", "reason": "Same issue target, but the visible changes are different code paths: one touches class-module lookup in modeling_utils, the other fixes resized LM head/post_init behavior.", "right": "pull_request:45079"}, {"accept": false, "left": "pull_request:44904", "reason": "Related Granite multiplier-field problem, but not the same change: 44904 coerces ints in GraniteSpeech nested text_config, while 45019 widens Granite config type hints.", "right": "pull_request:45019"}, {"accept": true, "left": "pull_request:45351", "reason": "Both patch the same function and same bug: get_device_properties should not call get_device_capability unless CUDA is available.", "right": "pull_request:45371"}, {"accept": false, "left": "pull_request:45211", "reason": "Unrelated changes: Qwen3MoE annotation fix versus adding the ax_k1 model stack.", "right": "pull_request:45334"}, {"accept": false, "left": "pull_request:45211", "reason": "Unrelated changes: Qwen3MoE annotation fix versus a separate ax_k1 model addition.", "right": "pull_request:45426"}, {"accept": false, "left": "pull_request:45211", "reason": "Different code paths and goals: Qwen3MoE annotation fix versus Llama tokenizer ByteLevel-BPE handling.", "right": "pull_request:45345"}, {"accept": false, "left": "pull_request:43177", "reason": "Different changes: cache reordering device handling versus adding a new ax_k1 model.", "right": "pull_request:45334"}, {"accept": false, "left": "pull_request:43177", "reason": "Different changes: cache reordering device handling versus a separate ax_k1 model addition.", "right": "pull_request:45426"}, {"accept": false, "left": "pull_request:43177", "reason": "Unrelated fixes: cache reordering device handling versus Llama tokenizer ByteLevel-BPE support.", "right": "pull_request:45345"}, {"accept": false, "left": "pull_request:42166", "reason": "Different model families and changes: InternVLFlash addition versus ax_k1 addition.", "right": "pull_request:45334"}, {"accept": false, "left": "pull_request:42166", "reason": "Different model families and changes: InternVLFlash addition versus ax_k1 addition.", "right": "pull_request:45426"}, {"accept": false, "left": "pull_request:42166", "reason": "Unrelated: InternVLFlash model addition versus Qwen3MoE annotation fix.", "right": "pull_request:45211"}, {"accept": false, "left": "pull_request:42166", "reason": "Unrelated: new model addition versus cache reordering fix.", "right": "pull_request:43177"}, {"accept": false, "left": "pull_request:45019", "reason": "Different concerns: Granite multiplier-field typing versus Qwen3MoE return annotation.", "right": "pull_request:45211"}, {"accept": false, "left": "pull_request:42166", "reason": "Different model areas and fixes: InternVLFlash addition versus Granite config typing.", "right": "pull_request:45019"}, {"accept": false, "left": "pull_request:42166", "reason": "Unrelated: InternVLFlash model addition versus resized LM head/post_init bugfix.", "right": "pull_request:45079"}, {"accept": false, "left": "pull_request:45019", "reason": "Different fixes: Granite config typing/validation versus resized LM head overwritten by post_init.", "right": "pull_request:45079"}, {"accept": false, "left": "pull_request:43177", "reason": "Different bugs in different subsystems: cache reordering versus Granite config typing.", "right": "pull_request:45019"}, {"accept": false, "left": "pull_request:45079", "reason": "Unrelated: resized LM head/post_init bugfix versus Qwen3MoE annotation fix.", "right": "pull_request:45211"}, {"accept": false, "left": "pull_request:43177", "reason": "Different bugs: cache reordering device handling versus resized LM head/post_init regression.", "right": "pull_request:45079"}, {"accept": false, "left": "pull_request:42166", "reason": "Different model/doc changes: InternVLFlash addition versus YOSO doctest stabilization.", "right": "pull_request:43331"}, {"accept": false, "left": "pull_request:42166", "reason": "Both are model additions, but for different model families and code paths; not a duplicate or same underlying change.", "right": "pull_request:42504"}, {"accept": false, "left": "pull_request:45351", "reason": "Unrelated fixes: CUDA availability guard in testing_utils versus Qwen3MoE return type annotation.", "right": "pull_request:45352"}], "summary": "This cluster is mostly heterogeneous: two real duplicate pairs stand out (Qwen3MoE annotation fix; CUDA availability guard in testing_utils), while the rest are separate model additions or unrelated bug fixes that only share a file or issue target."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45211|pull_request:45352", "pull_request:44711|pull_request:45079", "pull_request:44904|pull_request:45019", "pull_request:45351|pull_request:45371", "pull_request:45211|pull_request:45334", "pull_request:45211|pull_request:45426", "pull_request:45211|pull_request:45345", "pull_request:43177|pull_request:45334", "pull_request:43177|pull_request:45426", "pull_request:43177|pull_request:45345", "pull_request:42166|pull_request:45334", "pull_request:42166|pull_request:45426", "pull_request:42166|pull_request:45211", "pull_request:42166|pull_request:43177", "pull_request:45019|pull_request:45211", "pull_request:42166|pull_request:45019", "pull_request:42166|pull_request:45079", "pull_request:45019|pull_request:45079", "pull_request:43177|pull_request:45019", "pull_request:45079|pull_request:45211", "pull_request:43177|pull_request:45079", "pull_request:42166|pull_request:43331", "pull_request:42166|pull_request:42504", "pull_request:45351|pull_request:45352"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 12480, "estimated_input_tokens": 6112, "item_count": 10, "node_count": 10, "serialized_chars": 24448, "soft_pair_count": 7}, "cached_at": "2026-04-14T18:02:11Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "952935325cb4fbd811572c3319843769bce6fb8691b5702cf2fa818ffd379b26", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42504", "pull_request:43067", "pull_request:43177", "pull_request:45211", "pull_request:45280", "pull_request:45334", "pull_request:45351", "pull_request:45371", "pull_request:45426", "pull_request:45427"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45351 is the strongest overall PR candidate because it is a narrowly scoped bug fix, clearly tied to an issue, and represents one of the confirmed duplicate clusters without conflating it with unrelated model additions.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45351 is the cleanest canonical representative: it directly fixes the CUDA availability check, has an explicit issue target, and has review activity; 45427 is a near-verbatim follow-up to the same change.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45351", "reason": "Same `testing_utils.py` code path and same fix: guard `torch.cuda.get_device_capability()` with `torch.cuda.is_available()`. Minor ordering differences only.", "right": "pull_request:45427"}, {"accept": true, "left": "pull_request:45371", "reason": "Same underlying CUDA-availability guard fix in `testing_utils.py`; the patch shape differs slightly but the behavior change is the same.", "right": "pull_request:45427"}, {"accept": true, "left": "pull_request:45334", "reason": "These are the same `ax_k1` feature PR with identical files and essentially identical diff, so they are duplicates.", "right": "pull_request:45426"}, {"accept": false, "left": "pull_request:42504", "reason": "Both add model docs/code, but for different models (`ModernVBERT` vs `Qianfan-OCR`) and different feature implementations.", "right": "pull_request:45280"}, {"accept": false, "left": "pull_request:43177", "reason": "Unrelated changes: one adjusts cache reordering device handling, the other fixes a return type annotation in Qwen3MoE modeling.", "right": "pull_request:45211"}, {"accept": false, "left": "pull_request:43067", "reason": "Different model additions (`NomicBERT` vs `Qianfan-OCR`); same broad subsystem but not the same change.", "right": "pull_request:45280"}, {"accept": false, "left": "pull_request:42504", "reason": "Both are model-addition PRs, but they implement different models and code paths, so they are not duplicates.", "right": "pull_request:43067"}], "summary": "There are two real duplicate PR clusters: the CUDA-capability guard fix in `testing_utils.py`, and the `ax_k1` model-definition addition. The other model-introduction PRs are separate model additions and should not be merged together."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45351|pull_request:45427", "pull_request:45371|pull_request:45427", "pull_request:45334|pull_request:45426", "pull_request:42504|pull_request:45280", "pull_request:43177|pull_request:45211", "pull_request:43067|pull_request:45280", "pull_request:42504|pull_request:43067"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2290, "estimated_input_tokens": 1017, "item_count": 2, "node_count": 2, "serialized_chars": 4065, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:02:30Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e6f17a066372fb482a604612c9293a5f8daa09640405f830bdd413cf84368b94", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42797", "pull_request:43666"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 42797 is the more substantial and technically specific change in the cluster, fixing a concrete code-path issue in trainer_pt_utils. PR 43666 is purely docs/typo cleanup and is not comparable as a duplicate target.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:42797", "reason": "Different kinds of changes and different scopes: 42797 fixes LengthGroupedSampler to accept BatchFeature in training utilities, while 43666 only corrects wording in documentation. They do not share the same underlying bug or code-path.", "right": "pull_request:43666"}], "summary": "The cluster contains two unrelated pull requests: one is a targeted code fix for LengthGroupedSampler compatibility with BatchFeature in multimodal training, and the other is a documentation typo cleanup in getting-started docs. They do not appear to describe the same bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:42797|pull_request:43666"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5022, "estimated_input_tokens": 2383, "item_count": 3, "node_count": 3, "serialized_chars": 9529, "soft_pair_count": 2}, "cached_at": "2026-04-14T18:02:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e793cf1e4b2ad2681a70a8540b7cefbe3912a28e9715f900bc3c4a8601d8c49b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42975", "pull_request:42976", "pull_request:45180"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "45180 is the best representative of the cluster because it is the only merged PR and its scope is broad and well-defined; the other two are narrower version bumps with different motivations.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45180 is the strongest canonical PR: it is merged, has an explicit tracking issue, and makes a cohesive security-oriented change by pinning Actions to commit SHAs across many workflows.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:42975", "reason": "Both modify workflow action versions, but 42975 is a version upgrade for Node 24 compatibility while 45180 is a SHA pinning/security hardening change. Different underlying intent and not plausibly one merged PR.", "right": "pull_request:45180"}, {"accept": false, "left": "pull_request:42975", "reason": "These are both workflow dependency bumps, but they target different actions and different goals: general GitHub Actions compatibility vs specific docker/build-push-action updates. Not the same concrete change.", "right": "pull_request:42976"}], "summary": "All three PRs touch GitHub Actions workflow maintenance, but they are different change intents: 42975 upgrades actions for Node 24 compatibility, 42976 bumps docker/build-push-action and related workflow actions, and 45180 pins actions to commit SHAs for security/reproducibility. They are not duplicates of the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:42975|pull_request:45180", "pull_request:42975|pull_request:42976"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2634, "estimated_input_tokens": 1189, "item_count": 2, "node_count": 2, "serialized_chars": 4756, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:03:08Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "cd3a97c6eeec8fd654990f58e2cea3541ba3ea665bf0670c4f76c89108a5f562", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43241", "pull_request:44304"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44304 is the best PR to keep as the cluster representative since it has a specific, self-contained docs change and clearer intent than #43241.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44304 is the cleaner representative of the cluster because its title and diff clearly describe a single docs addition for NeMo Automodel, whereas #43241 has a vague title and is broader/more miscellaneous.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43241", "reason": "Both are docs PRs and share only the toctree file, but they add different integration pages (deployment backends vs. NeMo Automodel pretraining/finetuning). They do not fix the same concrete change and would not be merged as one PR.", "right": "pull_request:44304"}], "summary": "These are two unrelated documentation PRs that both touched the docs toctree but add different community integration pages. They are not duplicates of the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43241|pull_request:44304"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3990, "estimated_input_tokens": 1867, "item_count": 3, "node_count": 3, "serialized_chars": 7465, "soft_pair_count": 3}, "cached_at": "2026-04-14T18:03:31Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "41b2218aa00d880a9bb0cfaf599c28ff954f679ca64012d6c3f5492101f02b40", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43336", "pull_request:43658", "pull_request:43841"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43841 is the strongest representative of the cluster because it directly fixes the zero-sparse-layer behavior with the clearest description and implementation in the exact affected file.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43841 is the best canonical representative of the sparse-layer fix: it targets the same concrete bug as 43658, stays focused on the configuration code path, and includes the clearest explanatory update for disabling sparse layers when the count is zero.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43658", "reason": "Same underlying bug in SwitchTransformers config: both change the zero-case logic for sparse encoder/decoder layer stepping in the same file, and the patches are functionally the same fix with only minor wording/comment differences.", "right": "pull_request:43841"}, {"accept": false, "left": "pull_request:43336", "reason": "43336 is a doc/comment PR spanning unrelated documentation updates and an audio_utils comment change; it does not fix the sparse-layer zero-case bug and would not plausibly be merged with 43841 as one PR.", "right": "pull_request:43841"}, {"accept": false, "left": "pull_request:43336", "reason": "43336 is primarily documentation/comment cleanup, while 43658 is a behavioral bug fix for sparse layer creation; same issue target alone is not enough to treat them as duplicates.", "right": "pull_request:43658"}], "summary": "Two PRs are clear duplicates for the same SwitchTransformers sparse-layer-zero bug; the docstring/comment-only PR is a different change and should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43658|pull_request:43841", "pull_request:43336|pull_request:43841", "pull_request:43336|pull_request:43658"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2258, "estimated_input_tokens": 1001, "item_count": 2, "node_count": 2, "serialized_chars": 4004, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:04:09Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a063707211f0688142ded88f12af7700fa285190e29add450a011ab8726f045e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43354", "pull_request:43467"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:43467 is the strongest standalone PR: it addresses the wrong checkpoint/class mismatch in the integration tests by using the correct model class in the affected test paths.", "canonical_issue_reason": null, "canonical_pr_reason": "pull_request:43467 is the better canonical PR because it directly corrects the test code by switching to Glm4vMoeForConditionalGeneration for the GLM-4.5V checkpoint, which is a concrete functional fix rather than disabling the tests.", "confidence": 0.78, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43354", "reason": "Same file and issue target, but these are alternative resolutions, not the same change: 43354 skips the integration tests, while 43467 changes the instantiated model class. They do not look like one mergeable PR fixing the same concrete code-path bug.", "right": "pull_request:43467"}], "summary": "Both PRs are tied to the same tracking issue and the same GLM image integration test file, but they take different approaches: one skips the broken integration test, while the other updates it to use the correct model class. They are related, but not the same concrete fix and not something I\u2019d merge as one PR."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43354|pull_request:43467"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3652, "estimated_input_tokens": 1698, "item_count": 2, "node_count": 2, "serialized_chars": 6789, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:04:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3ab971698705a2733feae25cb380e28f09b900d46c5b7d242c5066253d98cfec", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43357", "pull_request:44171"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#44171 is the best single PR for this cluster because it is the more complete TDT support change and still open, whereas #43357 is an older, narrower Parakeet TDT PR.", "canonical_issue_reason": null, "canonical_pr_reason": "#44171 is the broader and newer Parakeet TDT implementation, and it covers the same feature area while adding the extra auto/processing integration that makes it the better representative of the change.", "confidence": 0.72, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43357", "reason": "They target the same underlying feature addition: Parakeet TDT support. The second PR appears to extend and complete the same implementation rather than introduce a different change, so they are plausibly mergeable into one PR.", "right": "pull_request:44171"}], "summary": "Both PRs are about adding Parakeet TDT support, with #44171 looking like the broader follow-up that also wires in AutoModelForTDT and related processing/docs updates."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is grounded in the packet: both PRs are about Parakeet TDT support, and #44171 does look like the broader follow-up with auto/processing/docs integration. The soft-edge verdict is conservative enough here because the changes are in the same feature area and plausibly mergeable into one PR."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:43357|pull_request:44171"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3394, "estimated_input_tokens": 1569, "item_count": 2, "node_count": 2, "serialized_chars": 6273, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:05:05Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c1a6c134745bb7d198a3426feb77a4c63904539b7a2e3834ec289eac59837ea3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43396", "pull_request:44167"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44167 is the best representative because it is merged and appears to be the finalized implementation of the exact same change.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44167 is the merged version of the same `ty` integration change set, with the same title, diff shape, and file scope as #43396.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43396", "reason": "These are effectively the same PR: same title, same touched files, and the diffs shown overlap on the core `ty` integration work. #44167 is the merged continuation/final version, so this is a valid duplicate pair.", "right": "pull_request:44167"}], "summary": "Two pull requests for the same initial `ty` typing-lint integration; the later one is the merged canonical PR and the earlier one is a near-identical duplicate/superseded attempt."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43396|pull_request:44167"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2398, "estimated_input_tokens": 1071, "item_count": 2, "node_count": 2, "serialized_chars": 4283, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:05:15Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ef83e5caa3d114ac3389fe37aac5b7e6ca5037932d59b4ac018ad834fe6b33fe", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43471", "pull_request:43640"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43471 is the best overall representative of this cluster because it is the more general dependency-missing UX fix and is more self-contained than the PEFT-specific error refinement in #43640.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43471 is the cleaner canonical PR because it is a focused user-facing error-message improvement with a small, self-contained change and tests.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43471", "reason": "Different underlying problems: #43471 handles missing serving CLI dependencies (`pydantic`, `fastapi`, `uvicorn`, `openai`), while #43640 handles missing `peft` when loading PEFT adapters. They affect distinct code paths and are not mergeable as one PR.", "right": "pull_request:43640"}], "summary": "These are two unrelated PRs: one improves the serving CLI\u2019s missing-dependency error, and the other improves the PEFT-adapter error path when `peft` is absent. They touch different code paths and would not be merged as one change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43471|pull_request:43640"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2164, "estimated_input_tokens": 954, "item_count": 2, "node_count": 2, "serialized_chars": 3813, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:05:30Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f1a0f34d38e8a305d31f743395464c3dd6a29ab1247b40bf3c21a8e71c7beb2a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43473", "pull_request:45031"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45031 is the better representative because it is narrowly scoped, self-contained, and describes a concrete code-path fix (`CamembertForCausalLM` weight tying) more directly than the more environment-dependent Apertus loading patch.", "canonical_issue_reason": null, "canonical_pr_reason": "No clear duplicate PR exists in this cluster. If one must be chosen as the representative, PR #45031 is slightly cleaner as a targeted model-specific correctness fix with an explicit tied-weights mapping correction.", "confidence": 0.99, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43473", "reason": "Different models, different code paths, and different failure modes: Apertus loading/meta-tensor initialization versus Camembert tied-weights metadata. They are not the same underlying change and would not plausibly merge into one PR.", "right": "pull_request:45031"}], "summary": "The two pull requests are unrelated fixes: one addresses Apertus model loading by avoiding meta-tensor scalar extraction in `activations.py`, while the other fixes Camembert\u2019s tied-weights mapping in `_tied_weights_keys`. They do not share the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43473|pull_request:45031"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2586, "estimated_input_tokens": 1165, "item_count": 2, "node_count": 2, "serialized_chars": 4660, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:05:43Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5952edb7b983b862995327f387c0ffe07f5fa196c2230c20f41554ca6d33f1dd", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43501", "pull_request:44475"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44475 is the best representative of the cluster: it is merged, narrow, and addresses one specific tokenizer auto-classification issue. PR #43501 is unrelated and not suitable as a cluster representative.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44475 is the better canonical PR only because it is a small, merged, concrete fix with a clear code-path target; PR #43501 is a separate feature addition in a different model family.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43501", "reason": "Different model families and different purposes: Qwen3 TTS tokenizer encoder vs Chameleon tokenizer-class blacklist. They do not share the same underlying bug/change and would not plausibly be merged as one PR.", "right": "pull_request:44475"}], "summary": "This cluster mixes two unrelated pull requests: one adds a Qwen3 TTS tokenizer encoder, the other adds Chameleon to a tokenizer auto-detection blacklist. They do not fix the same bug or implement the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43501|pull_request:44475"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5298, "estimated_input_tokens": 2521, "item_count": 6, "node_count": 6, "serialized_chars": 10084, "soft_pair_count": 5}, "cached_at": "2026-04-14T18:06:11Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "cec4c276b280fd7318a8e58f41c7465016576ce353de55f248080abfa26930f3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43515", "pull_request:43516", "pull_request:43753", "pull_request:43892", "pull_request:44948", "pull_request:45022"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43515 is the best overall representative if one must choose a PR from this cluster, but it should not be treated as a duplicate of the others.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43515 is the cleanest representative of the only near-duplicate pair because it is the minimal branch-scaffold change; 43516 adds an extra unrelated file, and the remaining PRs are about different subsystems.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43515", "reason": "Very similar scaffolded branch-creation PRs, but 43516 adds an extra file and is not the same exact change; looks like a separate test commit rather than a duplicate.", "right": "pull_request:43516"}, {"accept": false, "left": "pull_request:43516", "reason": "Completely different changes: one is a toy branch-creation scaffold, the other bumps KERNELS_MIN_VERSION in import_utils.", "right": "pull_request:43892"}, {"accept": false, "left": "pull_request:43753", "reason": "Different goals and code paths: dependency version constant update versus a docs/config edit.", "right": "pull_request:45022"}, {"accept": false, "left": "pull_request:43892", "reason": "Different files and behavior: hub metadata ordering in utils/update_metadata.py versus docs/source/_config.py.", "right": "pull_request:45022"}, {"accept": false, "left": "pull_request:44948", "reason": "No shared underlying bug or change; aa.py creation is unrelated to the docs config tweak.", "right": "pull_request:45022"}], "summary": "No true duplicates here; the only close pair (43515/43516) is just a similar branch-scaffold/test commit, while the other PRs are unrelated code or docs changes."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43515|pull_request:43516", "pull_request:43516|pull_request:43892", "pull_request:43753|pull_request:45022", "pull_request:43892|pull_request:45022", "pull_request:44948|pull_request:45022"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2572, "estimated_input_tokens": 1158, "item_count": 2, "node_count": 2, "serialized_chars": 4630, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:06:31Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "50ee2b5a6aefcd7ce8984bb2a5b724481ad5fd2aa33caca272d4b9b4aa2950a1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43517", "pull_request:43574"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:43517 is the clearest standalone PR in this cluster, but neither PR subsumes the other or appears mergeable into one change.", "canonical_issue_reason": null, "canonical_pr_reason": "pull_request:43517 is the better representative only because it is a self-contained Trainer fix with a clear, narrow behavior change; 43574 is a separate initialization/ZeRO-3 workaround for different models.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43517", "reason": "Different code paths and bug reports: 43517 gates sequence-parallel loss computation on eval/training mode in Trainer; 43574 adds variance-scaling initialization helpers and applies them to SigLIP-related modules for ZeRO-3. No shared filenames, targets, or concrete fix overlap.", "right": "pull_request:43574"}], "summary": "Two merged pull requests that touch different subsystems and fix different bugs: one enables evaluation during sequence parallel in Trainer, the other changes initialization behavior for SigLIP under ZeRO-3. They are not duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43517|pull_request:43574"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2458, "estimated_input_tokens": 1101, "item_count": 2, "node_count": 2, "serialized_chars": 4404, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:06:43Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f4747430f2142da5f6455fdcb71d76ad26524f585d1bf097b1ebe7154ce39583", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43545", "pull_request:43547"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43547 is the best representative because it addresses the concrete runtime behavior, not just test cleanup; PR #43545 is a narrower test-only change.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43547 is the more substantive fix: it changes the FP8 integration/quantizer code path and updates the related test, so it best represents the underlying bug-fix work in this cluster.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43545", "reason": "They share the fbgemm_fp8 test file, but #43545 removes a test and adjusts environment gating, while #43547 fixes the FP8 linear replacement/quantizer behavior. Different code paths and different underlying issues, so they should not be merged as duplicates.", "right": "pull_request:43547"}], "summary": "Both PRs are about the fbgemm_fp8 test area, but they fix different problems: one removes/adjusts a test class and availability gating, while the other changes the actual FP8 replacement/quantizer logic and test setup. They are related by subsystem, not the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43545|pull_request:43547"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3828, "estimated_input_tokens": 1786, "item_count": 3, "node_count": 3, "serialized_chars": 7144, "soft_pair_count": 2}, "cached_at": "2026-04-14T18:07:00Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "14bf507c00f594d83379ca73214a0cc43cfa680b1a64ed7980bc2d49f918f81e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43561", "pull_request:43754", "pull_request:45321"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "43561 is the most representative of the shared torchao integration theme, but only at a very high level; it does not subsume the SDPA workaround removal or the AffineQuantizedTensor reference cleanup.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR emerges because these are separate fixes. If forced to pick a representative, 43561 is the broadest merged torchao cleanup, but it is not a duplicate of the others.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43561", "reason": "Both touch torchao integration/tests, but 43561 removes autoquant support while 45321 only changes how quantized weights are described in __repr__/extra_repr. Different change goals and code paths.", "right": "pull_request:45321"}, {"accept": false, "left": "pull_request:43561", "reason": "43561 is about removing torchao.autoquant APIs and docs; 43754 removes SDPA contiguity workarounds for newer torch versions. Same general cleanup flavor, but unrelated fixes.", "right": "pull_request:43754"}], "summary": "All three PRs are related to Transformers cleanup/integration work, but they target different code paths and bug classes: torchao autoquant removal, SDPA workaround removal, and AffineQuantizedTensor reference cleanup. They should not be clustered as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43561|pull_request:45321", "pull_request:43561|pull_request:43754"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 21646, "estimated_input_tokens": 10695, "item_count": 18, "node_count": 18, "serialized_chars": 42780, "soft_pair_count": 24}, "cached_at": "2026-04-14T18:08:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a35d2b49c929daa3a28b4b26060c5e344290ec499f549af62aecd862ae1502b6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43983", "pull_request:43996", "pull_request:44007", "pull_request:44011", "pull_request:44013", "pull_request:44020", "pull_request:44022", "pull_request:44023", "pull_request:44039", "pull_request:44044", "pull_request:44066", "pull_request:44072", "pull_request:44085", "pull_request:44089", "pull_request:44091", "pull_request:44093", "pull_request:44129", "pull_request:44722"], "result": {"analyst_result": {"best_issue_reason": "Issue #43979 is the umbrella tracking issue referenced by every PR in the cluster, so it is the natural canonical issue for the broader refactor effort.", "best_pr_reason": "#43983 is the strongest representative PR because it is merged, directly implements the new tracing approach, and is the clearest upstream anchor for the rest of the series.", "canonical_issue_reason": null, "canonical_pr_reason": "#43983 is the best anchor PR: it is already merged, explicitly targets the common tracking issue, and establishes the standardized output-capture pattern that the later model-specific refactors build on.", "confidence": 0.87, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44011", "reason": "Different model code paths (Swin vs DeBERTa). Same umbrella issue, but not the same concrete change.", "right": "pull_request:44020"}, {"accept": false, "left": "pull_request:44011", "reason": "Different model implementations (Swin vs DeBERTa). They are thematically similar but not the same bug/change.", "right": "pull_request:44093"}, {"accept": false, "left": "pull_request:44020", "reason": "Different model families and unrelated files; both are output-tracing refactors but not the same code-path fix.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44091", "reason": "Swin vs DeBERTa output-tracing changes are separate model-specific refactors, not duplicates.", "right": "pull_request:44093"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin and Nystromformer are distinct implementations; same issue target does not make these the same change.", "right": "pull_request:44023"}, {"accept": false, "left": "pull_request:44023", "reason": "Different models and different forward paths; only the umbrella refactor theme is shared.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44022", "reason": "ConvBERT and Swin are separate model refactors with no shared concrete code path.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin output tracing and GPTJ/CodeGen output tracing are different model-specific changes.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44091", "reason": "Different models and different files; both fit the same theme but not the same fix.", "right": "pull_request:44722"}, {"accept": true, "left": "pull_request:44085", "reason": "Both touch GPTJ output-tracing in the same file/code path, and #44722 looks like a broader rework that could plausibly subsume the same GPTJ change.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44020", "reason": "DeBERTa and GPTJ are unrelated model implementations; the shared issue target is too broad.", "right": "pull_request:44085"}, {"accept": false, "left": "pull_request:44085", "reason": "GPTJ and DeBERTa are different concrete code paths, so these are not duplicates.", "right": "pull_request:44093"}, {"accept": false, "left": "pull_request:44089", "reason": "SqueezeBERT/T5 output-tracing work is unrelated to DeBERTa output-tracing work.", "right": "pull_request:44093"}, {"accept": false, "left": "pull_request:44085", "reason": "Different models and different model files; same refactor style only.", "right": "pull_request:44089"}, {"accept": false, "left": "pull_request:44020", "reason": "DeBERTa and SpeechT5 are separate model-specific changes, not one mergeable PR.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:43996", "reason": "CVT/FNet versus GPTJ are unrelated implementations; only the umbrella tracing effort matches.", "right": "pull_request:44085"}, {"accept": false, "left": "pull_request:43996", "reason": "Different model families and files; no shared concrete bug/change.", "right": "pull_request:44044"}, {"accept": false, "left": "pull_request:44007", "reason": "These affect different model stacks (RegNet/ResNet/RT-DETR vs EfficientNet), so they are not the same change.", "right": "pull_request:44072"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin and GPTJ are distinct model-specific refactors.", "right": "pull_request:44066"}, {"accept": false, "left": "pull_request:43996", "reason": "Different model families and different code paths; same issue target only indicates they belong to the broader refactor series.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44089", "reason": "SqueezeBERT/T5 and Swin are unrelated implementations.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44039", "reason": "The AI-fix placeholder is not a concrete duplicate of the GPTJ refactor PR.", "right": "pull_request:44085"}, {"accept": false, "left": "pull_request:43983", "reason": "DecisionTransformer/GPT2 output capture and DeBERTaV2 output tracing are separate model-specific refactors.", "right": "pull_request:44044"}, {"accept": false, "left": "pull_request:44013", "reason": "MobileNetV2 and DeBERTaV2 changes are unrelated beyond the umbrella tracing theme.", "right": "pull_request:44044"}], "summary": "This cluster is mostly a set of model-specific PRs all tied to the same umbrella output-tracing refactor, but they are not duplicates of one another except for a couple of same-model follow-ups/rebases. The shared issue target alone is not enough to merge most pairs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44011|pull_request:44020", "pull_request:44011|pull_request:44093", "pull_request:44020|pull_request:44091", "pull_request:44091|pull_request:44093", "pull_request:44011|pull_request:44023", "pull_request:44023|pull_request:44091", "pull_request:44022|pull_request:44091", "pull_request:44011|pull_request:44722", "pull_request:44091|pull_request:44722", "pull_request:44085|pull_request:44722", "pull_request:44020|pull_request:44085", "pull_request:44085|pull_request:44093", "pull_request:44089|pull_request:44093", "pull_request:44085|pull_request:44089", "pull_request:44020|pull_request:44129", "pull_request:43996|pull_request:44085", "pull_request:43996|pull_request:44044", "pull_request:44007|pull_request:44072", "pull_request:44011|pull_request:44066", "pull_request:43996|pull_request:44091", "pull_request:44089|pull_request:44091", "pull_request:44039|pull_request:44085", "pull_request:43983|pull_request:44044", "pull_request:44013|pull_request:44044"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20006, "estimated_input_tokens": 9875, "item_count": 18, "node_count": 18, "serialized_chars": 39497, "soft_pair_count": 16}, "cached_at": "2026-04-14T18:09:15Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "65730f90934b038f9e0601b19cd130b8e404fe21beb2649bd3fb21dc60da0b54", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44011", "pull_request:44018", "pull_request:44019", "pull_request:44059", "pull_request:44066", "pull_request:44068", "pull_request:44071", "pull_request:44085", "pull_request:44086", "pull_request:44088", "pull_request:44091", "pull_request:44102", "pull_request:44104", "pull_request:44105", "pull_request:44139", "pull_request:44140", "pull_request:44141", "pull_request:44335"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44091 is the strongest single PR example because it is self-contained and concrete, but it should only represent the Swin subgroup, not the whole cluster of model-specific refactors.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44091 is the best representative of the cluster: it is a complete, closed Swin output-tracing refactor, explicitly tied to issue 43979, and includes tests. The rest are model-specific instances of the same broader migration pattern.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44059", "reason": "Same model file (GPT-2) and same output-tracing refactor; these look like duplicate attempts at the same code-path change.", "right": "pull_request:44088"}, {"accept": false, "left": "pull_request:44066", "reason": "Different models and code paths (GPT-J/codegen vs MGP-STR); only the decorator theme is shared.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44066", "reason": "GPT-J/codegen vs GPT-2 are separate model implementations, so this is not the same underlying change.", "right": "pull_request:44088"}, {"accept": true, "left": "pull_request:44139", "reason": "Same LiLT file and essentially the same output-tracing migration; this is a clear duplicate pair.", "right": "pull_request:44141"}, {"accept": true, "left": "pull_request:44018", "reason": "Both target GPT-Neo output tracing in the same model file and test file, so they appear to be the same change.", "right": "pull_request:44068"}, {"accept": false, "left": "pull_request:44066", "reason": "Different model families (GPT-J/codegen vs MPT) with no shared code path; same refactor style is not enough.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44105", "reason": "LiLT and IBERT are different models; they share a migration pattern but not the same bug/change.", "right": "pull_request:44141"}, {"accept": false, "left": "pull_request:44140", "reason": "Megatron-BERT and IBERT are separate implementations, so this is only a thematic match, not a duplicate.", "right": "pull_request:44141"}, {"accept": false, "left": "pull_request:44102", "reason": "IBERT vs LiLT are different model code paths; similar decorator refactor does not make them duplicates.", "right": "pull_request:44105"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin vs ResNet are different models and different fixes; shared output-tracing refactor theme is too broad.", "right": "pull_request:44019"}, {"accept": false, "left": "pull_request:44019", "reason": "ResNet and Swin are unrelated model implementations, so this is not the same underlying change.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44102", "reason": "IBERT and LiLT are distinct code paths; same refactor pattern is not enough to merge them.", "right": "pull_request:44139"}, {"accept": false, "left": "pull_request:44104", "reason": "Megatron-BERT and IBERT are different models; these are separate refactors rather than duplicates.", "right": "pull_request:44141"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin vs IBERT are different model implementations, so they should stay separate.", "right": "pull_request:44102"}, {"accept": false, "left": "pull_request:44091", "reason": "Swin and IBERT do not share the same concrete code path, only the same decorator migration idea.", "right": "pull_request:44102"}, {"accept": false, "left": "pull_request:44085", "reason": "GPT-J and RoFormer are different models; same broad refactor style does not indicate a duplicate.", "right": "pull_request:44335"}], "summary": "This cluster is mostly a repeated refactor pattern: migrating individual model files to standardized output tracing decorators. The only true duplicates are same-model PRs that touch the same concrete code paths; cross-model similarities are just the shared refactor pattern."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44059|pull_request:44088", "pull_request:44066|pull_request:44086", "pull_request:44066|pull_request:44088", "pull_request:44139|pull_request:44141", "pull_request:44018|pull_request:44068", "pull_request:44066|pull_request:44071", "pull_request:44105|pull_request:44141", "pull_request:44140|pull_request:44141", "pull_request:44102|pull_request:44105", "pull_request:44011|pull_request:44019", "pull_request:44019|pull_request:44091", "pull_request:44102|pull_request:44139", "pull_request:44104|pull_request:44141", "pull_request:44011|pull_request:44102", "pull_request:44091|pull_request:44102", "pull_request:44085|pull_request:44335"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20596, "estimated_input_tokens": 10170, "item_count": 18, "node_count": 18, "serialized_chars": 40678, "soft_pair_count": 19}, "cached_at": "2026-04-14T18:10:27Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7d5e0a5d990ffbad29680151c248d30b6a263abb07220362e33fad8abc4906ee", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43590", "pull_request:44011", "pull_request:44018", "pull_request:44019", "pull_request:44020", "pull_request:44046", "pull_request:44065", "pull_request:44066", "pull_request:44068", "pull_request:44084", "pull_request:44086", "pull_request:44087", "pull_request:44088", "pull_request:44091", "pull_request:44093", "pull_request:44094", "pull_request:44335", "pull_request:44722"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#43590 is the strongest global representative because it is already merged and spans the same standardized output-tracing cleanup across many model implementations, whereas the others are narrower per-model follow-ups.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43590 is the broad umbrella change: merged, high-coverage, and directly about removing traced outputs across 100+ models. It is the best canonical anchor for this refactor family.", "confidence": 0.86, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44019", "reason": "Different models (ResNet vs CTRL) and different code paths; both are just instances of the same refactor theme, not the same concrete change.", "right": "pull_request:44065"}, {"accept": false, "left": "pull_request:44019", "reason": "ResNet output tracing vs DeBERTa output tracing are separate model-specific changes.", "right": "pull_request:44020"}, {"accept": false, "left": "pull_request:44019", "reason": "Same refactor theme, but ResNet and DeBERTa are unrelated implementation paths.", "right": "pull_request:44093"}, {"accept": false, "left": "pull_request:43590", "reason": "#43590 is a broad umbrella sweep across many models; #44066 is a later, narrower GPT-J/CodeGen-specific refactor, so they are not the same PR-level change.", "right": "pull_request:44066"}, {"accept": false, "left": "pull_request:44086", "reason": "MGP-STR and GPT-J/CodeGen are different model implementations; this is similarity of pattern, not the same code-path fix.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44088", "reason": "GPT-2 and RoFormer output tracing are separate model-specific refactors.", "right": "pull_request:44335"}, {"accept": false, "left": "pull_request:44088", "reason": "GPT-2 vs GPT-J/CodeGen are distinct model files and changes; same refactor family is not enough to merge them.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44065", "reason": "CTRL and GPT-2 are different models and the diffs touch different attention implementations.", "right": "pull_request:44088"}, {"accept": false, "left": "pull_request:44084", "reason": "GPT-J and MGP-STR are unrelated model code paths.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44068", "reason": "GPT-Neo and GPT-2 are separate model implementations; this is not the same concrete bug/change.", "right": "pull_request:44088"}, {"accept": false, "left": "pull_request:44065", "reason": "CTRL and Mamba2 are unrelated model code paths, despite both being output-tracing refactors.", "right": "pull_request:44087"}, {"accept": false, "left": "pull_request:44084", "reason": "GPT-J and GPT-2 are different models; broad tracing cleanup does not make them the same PR.", "right": "pull_request:44088"}, {"accept": true, "left": "pull_request:44046", "reason": "Both touch CodeGen output tracing, and #44066 appears to include the same CodeGen hunk plus extra GPT-J changes, so these are overlapping versions of the same change.", "right": "pull_request:44066"}, {"accept": true, "left": "pull_request:44066", "reason": "Both include the same CodeGen output-tracing refactor; #44066 looks like a superset/combined variant of the CodeGen change present in #44094.", "right": "pull_request:44094"}, {"accept": false, "left": "pull_request:44018", "reason": "GPT-Neo and DeBERTa are unrelated model files and separate refactors.", "right": "pull_request:44020"}, {"accept": false, "left": "pull_request:44018", "reason": "GPT-Neo vs DeBERTa are distinct model-specific code paths.", "right": "pull_request:44093"}, {"accept": false, "left": "pull_request:44087", "reason": "Mamba2 and GPT-J/CodeGen are different model implementations; not the same underlying fix.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin and GPT-Neo are unrelated model refactors.", "right": "pull_request:44018"}, {"accept": false, "left": "pull_request:44018", "reason": "GPT-Neo and Swin are separate models; same tracing pattern does not justify merging.", "right": "pull_request:44091"}], "summary": "This cluster is mostly a sequence of model-specific refactors to standardize output tracing / remove output_attentions, not a single bug report. A few PRs are near-duplicate or superset/subset versions for the same model file, but most pairings are different models and should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44019|pull_request:44065", "pull_request:44019|pull_request:44020", "pull_request:44019|pull_request:44093", "pull_request:43590|pull_request:44066", "pull_request:44086|pull_request:44722", "pull_request:44088|pull_request:44335", "pull_request:44088|pull_request:44722", "pull_request:44065|pull_request:44088", "pull_request:44084|pull_request:44086", "pull_request:44068|pull_request:44088", "pull_request:44065|pull_request:44087", "pull_request:44084|pull_request:44088", "pull_request:44046|pull_request:44066", "pull_request:44066|pull_request:44094", "pull_request:44018|pull_request:44020", "pull_request:44018|pull_request:44093", "pull_request:44087|pull_request:44722", "pull_request:44011|pull_request:44018", "pull_request:44018|pull_request:44091"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20386, "estimated_input_tokens": 10065, "item_count": 18, "node_count": 18, "serialized_chars": 40258, "soft_pair_count": 18}, "cached_at": "2026-04-14T18:11:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b58315275537458887a42dd4c33e4f379abaee265d388e04f8dbfdc67d6e30c3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44011", "pull_request:44018", "pull_request:44020", "pull_request:44046", "pull_request:44047", "pull_request:44049", "pull_request:44065", "pull_request:44068", "pull_request:44071", "pull_request:44084", "pull_request:44085", "pull_request:44087", "pull_request:44091", "pull_request:44094", "pull_request:44098", "pull_request:44101", "pull_request:44116", "pull_request:44722"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44011 is the best overall representative because it is the most settled/traceable exemplar of the standardized output-tracing refactor work: closed, issue-linked, and narrowly scoped to one model.", "canonical_issue_reason": null, "canonical_pr_reason": "Pick PR 44011 as the canonical representative: it is closed, has an explicit issue target, and is a clean example of the Swin output-tracing refactor pattern in this batch.", "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44049", "reason": "Different models and code paths (FNet vs GPT-J); same refactor theme only, not the same underlying change.", "right": "pull_request:44084"}, {"accept": false, "left": "pull_request:44018", "reason": "GPT-Neo and Flaubert are unrelated model implementations; this is not one shared bug or patch.", "right": "pull_request:44116"}, {"accept": false, "left": "pull_request:44068", "reason": "Different model families and forward paths; only the decorator-refactor motif overlaps.", "right": "pull_request:44116"}, {"accept": false, "left": "pull_request:44047", "reason": "Bloom vs FNet are separate model implementations; not the same concrete fix.", "right": "pull_request:44049"}, {"accept": false, "left": "pull_request:44065", "reason": "CTRL and MPT touch different attention blocks and model code; too broad to merge as one PR.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44065", "reason": "CTRL vs GPT-J are different model paths; same modernization theme but not the same underlying change.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44071", "reason": "MPT refactor and Mamba2/hub_kernels change are distinct changes with different affected code paths.", "right": "pull_request:44087"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin vs GPT-Neo are different model implementations; not the same bug or change.", "right": "pull_request:44068"}, {"accept": false, "left": "pull_request:44068", "reason": "GPT-Neo vs Swin; these are separate model-specific refactors, not one mergeable PR.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44068", "reason": "GPT-Neo and GPT-J are distinct code paths; they share a pattern, not a single underlying fix.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44049", "reason": "FNet vs GPT-J; unrelated models, so this is not the same concrete change.", "right": "pull_request:44085"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin and CTRL are different model implementations; only the standardized-refactor theme is shared.", "right": "pull_request:44065"}, {"accept": false, "left": "pull_request:44065", "reason": "CTRL vs Swin are unrelated model code paths; not a duplicate change.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44046", "reason": "CodeGen and GPT-J are different model implementations; same output-tracing refactor family, but not one patch.", "right": "pull_request:44084"}, {"accept": false, "left": "pull_request:44084", "reason": "GPT-J vs CodeGen are separate model code paths; not the same underlying bug or change.", "right": "pull_request:44094"}, {"accept": false, "left": "pull_request:44020", "reason": "DeBERTa and CodeGen are different models with different attention implementations; not a duplicate.", "right": "pull_request:44046"}, {"accept": false, "left": "pull_request:44020", "reason": "DeBERTa vs CodeGen; shared refactor style only, no shared concrete fix.", "right": "pull_request:44094"}, {"accept": false, "left": "pull_request:44098", "reason": "ViLT and Flaubert/XLM are different model stacks and files; not the same change.", "right": "pull_request:44101"}], "summary": "This cluster is mostly a batch of model-specific refactors to the new output-tracing decorator pattern, not a single duplicate bug/change. The soft-edge pairs are cross-model and should be rejected; they share the same refactor theme but not the same concrete code path. There are a few near-identical PRs in the wider set, but they are not the provided soft pairs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44049|pull_request:44084", "pull_request:44018|pull_request:44116", "pull_request:44068|pull_request:44116", "pull_request:44047|pull_request:44049", "pull_request:44065|pull_request:44071", "pull_request:44065|pull_request:44722", "pull_request:44071|pull_request:44087", "pull_request:44011|pull_request:44068", "pull_request:44068|pull_request:44091", "pull_request:44068|pull_request:44722", "pull_request:44049|pull_request:44085", "pull_request:44011|pull_request:44065", "pull_request:44065|pull_request:44091", "pull_request:44046|pull_request:44084", "pull_request:44084|pull_request:44094", "pull_request:44020|pull_request:44046", "pull_request:44020|pull_request:44094", "pull_request:44098|pull_request:44101"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19850, "estimated_input_tokens": 9797, "item_count": 18, "node_count": 18, "serialized_chars": 39187, "soft_pair_count": 15}, "cached_at": "2026-04-14T18:11:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "dea674857a82129c3089d0ba71d47be0c1b040c1c7406948fab3770e6f27f195", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44017", "pull_request:44018", "pull_request:44019", "pull_request:44022", "pull_request:44023", "pull_request:44049", "pull_request:44059", "pull_request:44068", "pull_request:44071", "pull_request:44073", "pull_request:44084", "pull_request:44085", "pull_request:44086", "pull_request:44087", "pull_request:44088", "pull_request:44161", "pull_request:44335", "pull_request:44722"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44084 is the best global representative because it has explicit issue linkage, a focused implementation, and sits squarely in the common output-tracing refactor stream without mixing in unrelated changes.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44084 is the cleanest representative of the shared theme: it is issue-linked to #43979, focused on a single concrete model refactor, and the diff aligns directly with the standardized output-tracing change.", "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44335", "reason": "Different models and change scopes: RoFormer output-tracing refactor vs GPT-J plus CodeGen-related edits. Same broad pattern, but not the same underlying bug/change.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44068", "reason": "GPT-Neo refactor vs MGP-STR refactor. Both use the new tracing decorators, but they affect different model code paths and are not mergeable as one PR.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44023", "reason": "Nystromformer and MPT are separate model implementations. These are parallel refactors, not one concrete fix to the same code path.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44059", "reason": "GPT-2 output-tracing refactor is unrelated to MGP-STR. Same refactor family, different underlying model behavior.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44059", "reason": "GPT-2 vs GPT-J/CodeGen are distinct model changes. They share a tooling pattern but not the same bug or patch.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44084", "reason": "GPT-J output tracing and RoFormer output tracing are separate model-specific refactors, not one shared fix.", "right": "pull_request:44335"}, {"accept": false, "left": "pull_request:44018", "reason": "GPT-Neo and RoFormer are unrelated model code paths; these PRs are only similar in that they both adopt the same decorator pattern.", "right": "pull_request:44335"}, {"accept": false, "left": "pull_request:44023", "reason": "Nystromformer vs MGP-STR affects different architectures and different forward implementations.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44022", "reason": "ConvBERT and MPT are separate models with independent refactors; no shared concrete defect or change.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44017", "reason": "SegFormer and LongT5 are unrelated models; the overlap is only the generalized output-tracing migration.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:44017", "reason": "SegFormer and VisualBERT are different architectures with distinct code paths, so this is not a duplicate change.", "right": "pull_request:44073"}, {"accept": false, "left": "pull_request:44087", "reason": "Mamba2 includes a hub-kernels version-handling fix in addition to output tracing, while GPT-2 is only a tracing refactor. Different underlying changes.", "right": "pull_request:44088"}, {"accept": false, "left": "pull_request:44022", "reason": "ConvBERT and MGP-STR are separate model refactors; same broad modernization, different concrete implementations.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44019", "reason": "ResNet and FNet are different model families. These are parallel decorator migrations, not the same bug or PR.", "right": "pull_request:44049"}, {"accept": false, "left": "pull_request:44071", "reason": "MPT and GPT-J are distinct model implementations; the refactors are similar in style but not the same code-path fix.", "right": "pull_request:44085"}], "summary": "This cluster is mostly a set of model-specific PRs refactoring output tracing to the new capture_outputs/can_return_tuple pattern. They share a common refactor theme, but each PR targets a different model or even unrelated code, so they are not duplicates of one another."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44335|pull_request:44722", "pull_request:44068|pull_request:44086", "pull_request:44023|pull_request:44071", "pull_request:44059|pull_request:44086", "pull_request:44059|pull_request:44722", "pull_request:44084|pull_request:44335", "pull_request:44018|pull_request:44335", "pull_request:44023|pull_request:44086", "pull_request:44022|pull_request:44071", "pull_request:44017|pull_request:44161", "pull_request:44017|pull_request:44073", "pull_request:44087|pull_request:44088", "pull_request:44022|pull_request:44086", "pull_request:44019|pull_request:44049", "pull_request:44071|pull_request:44085"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19722, "estimated_input_tokens": 9733, "item_count": 18, "node_count": 18, "serialized_chars": 38931, "soft_pair_count": 14}, "cached_at": "2026-04-14T18:12:44Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "69cececd16b726343e3529db890e5769e5b706d514292c61fdddf71f097a0ecc", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43997", "pull_request:44011", "pull_request:44012", "pull_request:44017", "pull_request:44020", "pull_request:44047", "pull_request:44056", "pull_request:44071", "pull_request:44073", "pull_request:44084", "pull_request:44086", "pull_request:44087", "pull_request:44091", "pull_request:44092", "pull_request:44101", "pull_request:44161", "pull_request:44335", "pull_request:44722"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44011 is the best overall representative because it is narrowly scoped, well-linked to the tracking issue, and its change is concrete and self-contained; the other PRs are parallel per-model refactors rather than the same fix.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44011 is the cleanest representative: it has an explicit issue target (#43979), a focused single-model refactor, and matches the duplicated Swin-style output-tracing change pattern more clearly than the broader or less explicit PRs.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44011", "reason": "Different models and code paths (Swin vs MGP-STR); same refactor pattern, but not the same underlying change.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44017", "reason": "Different architectures (SegFormer vs GPT-J/CodeGen) and different files; only share the broad output-tracing theme.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44047", "reason": "Bloom and MPT are separate model implementations with distinct forward logic; not mergeable as one PR.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44020", "reason": "DeBERTa vs Mamba2 are unrelated code paths; this is only a shared decorator/refactor pattern.", "right": "pull_request:44087"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and VisualBERT are different model families with different output shapes and tracing sites.", "right": "pull_request:44073"}, {"accept": false, "left": "pull_request:44073", "reason": "VisualBERT and XLM/Flaubert touch different model stacks and different attention/output plumbing.", "right": "pull_request:44101"}, {"accept": false, "left": "pull_request:44011", "reason": "Swin and SwinV2 are similar families but distinct implementations; the edits are model-specific, not one shared fix.", "right": "pull_request:44012"}, {"accept": false, "left": "pull_request:44011", "reason": "Different models (Swin vs SwinV2) and separate tracing code; too broad to deduplicate.", "right": "pull_request:44092"}, {"accept": false, "left": "pull_request:44012", "reason": "SwinV2 vs Swin are not the same underlying bug/change, despite similar refactor wording.", "right": "pull_request:44091"}, {"accept": false, "left": "pull_request:44091", "reason": "Same refactor theme, but Swin and SwinV2 remain distinct code paths and should not be merged.", "right": "pull_request:44092"}, {"accept": false, "left": "pull_request:44017", "reason": "SegFormer and RoFormer changes are separate model-specific output-tracing refactors.", "right": "pull_request:44335"}, {"accept": false, "left": "pull_request:44047", "reason": "Bloom and GPT-J are distinct model implementations; these are parallel refactors, not duplicates.", "right": "pull_request:44084"}, {"accept": false, "left": "pull_request:44056", "reason": "MPNet and XLM/Flaubert affect different model files and attention/output logic.", "right": "pull_request:44101"}, {"accept": false, "left": "pull_request:44073", "reason": "VisualBERT and LongT5 are unrelated model code paths; same architecture motif is insufficient for acceptance.", "right": "pull_request:44161"}], "summary": "These are mostly related but model-specific output-tracing refactors, not true duplicates. The only plausible deduplication signal is within the Swin/Swinv2 pairings, but those still target different model code paths, so they should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44011|pull_request:44086", "pull_request:44017|pull_request:44722", "pull_request:44047|pull_request:44071", "pull_request:44020|pull_request:44087", "pull_request:43997|pull_request:44073", "pull_request:44073|pull_request:44101", "pull_request:44011|pull_request:44012", "pull_request:44011|pull_request:44092", "pull_request:44012|pull_request:44091", "pull_request:44091|pull_request:44092", "pull_request:44017|pull_request:44335", "pull_request:44047|pull_request:44084", "pull_request:44056|pull_request:44101", "pull_request:44073|pull_request:44161"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19838, "estimated_input_tokens": 9791, "item_count": 18, "node_count": 18, "serialized_chars": 39164, "soft_pair_count": 15}, "cached_at": "2026-04-14T18:13:15Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2ce6b34b3e246a3e9789629b91ada63c654131c13b5e8a9f0c18407d008ac6ac", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43997", "pull_request:44007", "pull_request:44012", "pull_request:44017", "pull_request:44022", "pull_request:44046", "pull_request:44049", "pull_request:44056", "pull_request:44059", "pull_request:44065", "pull_request:44071", "pull_request:44076", "pull_request:44092", "pull_request:44101", "pull_request:44161", "pull_request:44333", "pull_request:44335", "pull_request:45365"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44007 is the strongest cluster anchor because it covers several backbone models and directly implements the common output-tracing refactor pattern that the others are following independently.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44007 is the best representative of the shared refactor theme: it has an explicit issue link, touches multiple related model files, and appears to be the broader architectural change in this batch.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44059", "reason": "Different models (GPT-2 vs CTRL) and different forward paths; same decorator theme is not enough to treat them as one change.", "right": "pull_request:44065"}, {"accept": false, "left": "pull_request:44017", "reason": "SegFormer and MPT are unrelated codebases; these are separate refactors, not the same bug or patch.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44059", "reason": "GPT-2 and RoFormer are distinct model implementations with different attention/output logic.", "right": "pull_request:44335"}, {"accept": false, "left": "pull_request:44101", "reason": "XLM/Flaubert vs LongT5 are unrelated models; shared output-handling cleanup does not imply duplicate PRs.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:44012", "reason": "Swinv2 and ConvBERT are separate model families; these are independent decorator migrations.", "right": "pull_request:44022"}, {"accept": false, "left": "pull_request:44022", "reason": "ConvBERT and Swinv2 changes are not the same underlying fix, just the same refactor pattern.", "right": "pull_request:44092"}, {"accept": false, "left": "pull_request:44012", "reason": "Swinv2 attention/output tracing work and CodeGen tracing work affect different model internals and can\u2019t plausibly be merged as one PR.", "right": "pull_request:44046"}, {"accept": false, "left": "pull_request:44046", "reason": "CodeGen and Swinv2 are unrelated architectures; no shared concrete bug or code path.", "right": "pull_request:44092"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and MPNet are different models with different output-capture plumbing.", "right": "pull_request:44056"}, {"accept": false, "left": "pull_request:44065", "reason": "CTRL and Flaubert/XLM are separate model implementations; same output-tracing motif only.", "right": "pull_request:44101"}, {"accept": false, "left": "pull_request:44049", "reason": "FNet and ALBERT are unrelated model families; these are not duplicate fixes.", "right": "pull_request:44333"}, {"accept": false, "left": "pull_request:44071", "reason": "MPT and ImageGPT touch different forward implementations and different output semantics.", "right": "pull_request:44076"}, {"accept": false, "left": "pull_request:44056", "reason": "MPNet and LongT5 are unrelated models; no shared concrete change beyond generic tracing cleanup.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and LongT5 are distinct architectures; the PRs address different code paths.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:44007", "reason": "ResNet/RegNet tracing refactor and GPT-J tracing refactor are different model-specific changes, not one mergeable PR.", "right": "pull_request:45365"}], "summary": "This cluster is a set of model-specific refactors around standardized output tracing/capture decorators, but the PRs target different architectures and code paths. They are related thematically, not duplicates, so I reject all soft-edge merges."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44059|pull_request:44065", "pull_request:44017|pull_request:44071", "pull_request:44059|pull_request:44335", "pull_request:44101|pull_request:44161", "pull_request:44012|pull_request:44022", "pull_request:44022|pull_request:44092", "pull_request:44012|pull_request:44046", "pull_request:44046|pull_request:44092", "pull_request:43997|pull_request:44056", "pull_request:44065|pull_request:44101", "pull_request:44049|pull_request:44333", "pull_request:44071|pull_request:44076", "pull_request:44056|pull_request:44161", "pull_request:43997|pull_request:44161", "pull_request:44007|pull_request:45365"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20306, "estimated_input_tokens": 10025, "item_count": 18, "node_count": 18, "serialized_chars": 40100, "soft_pair_count": 17}, "cached_at": "2026-04-14T18:13:48Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "de4a6f0d66bb65331a9c73959dd1fe03f03e08d095910546f2c43fc6d6a1aac7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43996", "pull_request:43997", "pull_request:44010", "pull_request:44011", "pull_request:44018", "pull_request:44044", "pull_request:44047", "pull_request:44049", "pull_request:44071", "pull_request:44073", "pull_request:44074", "pull_request:44089", "pull_request:44091", "pull_request:44101", "pull_request:44129", "pull_request:44147", "pull_request:44161", "pull_request:44333"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44161 is the strongest single representative of the broader cluster theme: it is explicit about the standardized output-capture refactor and issue target, with a substantial model implementation change.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44091 is the cleanest representative of the only real duplicate pair: it matches #44011 on the same Swin files and the same issue target, and looks like the later, more complete revision.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44074", "reason": "Both are output-tracing refactors, but they touch different models and different code paths; not the same underlying change.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:44147", "reason": "Different models and different refactor scopes; shared decorator usage is too broad to be a duplicate.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:43996", "reason": "Same general migration pattern, but CVT/FNet are separate implementations and these are distinct PRs.", "right": "pull_request:44049"}, {"accept": false, "left": "pull_request:44047", "reason": "Bloom refactor vs. ALBERT named-attribute cleanup: unrelated model changes, not the same fix.", "right": "pull_request:44333"}, {"accept": false, "left": "pull_request:44010", "reason": "Both are standardized output refactors, but SqueezeBERT and MPT are different model code paths.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44010", "reason": "Same broad decorator migration theme, but different models and different implementation details.", "right": "pull_request:44073"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet output-tracing migration and TextNet refactor are separate model-specific changes.", "right": "pull_request:44074"}, {"accept": false, "left": "pull_request:44018", "reason": "GPT-Neo refactor and SpeechT5 refactor are unrelated code paths despite similar output-tracing goals.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:44071", "reason": "MPT and the SqueezeBERT/T5 refactor are different models; no evidence they fix the same bug.", "right": "pull_request:44089"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and SpeechT5 are different model implementations; similarity is only at the refactor theme level.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:44010", "reason": "Distinct model-specific refactors; no shared concrete failure or code path.", "right": "pull_request:44018"}, {"accept": false, "left": "pull_request:44074", "reason": "TextNet and XLM/Flaubert are separate model families; not the same underlying change.", "right": "pull_request:44101"}, {"accept": false, "left": "pull_request:43996", "reason": "CVT/FNet refactors versus LongT5 standardized tracing are different implementations and not mergeable as one PR.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:44010", "reason": "SqueezeBERT and DeBERTa V2 refactors are only superficially similar; they target different model internals.", "right": "pull_request:44044"}, {"accept": false, "left": "pull_request:44044", "reason": "Different models and different output-handling logic; too broad to treat as duplicate.", "right": "pull_request:44074"}, {"accept": false, "left": "pull_request:44044", "reason": "DeBERTa V2 vs CTRL: separate model code paths, same refactor family only.", "right": "pull_request:44147"}, {"accept": true, "left": "pull_request:44011", "reason": "These are the same Swin output-tracing refactor on the same files and issue target; one appears to be a revised duplicate of the other.", "right": "pull_request:44091"}], "summary": "Mostly separate model-specific output-tracing refactors grouped by theme, not true duplicates. The only clear duplicate pair is the two Swin PRs, which appear to be the same change/revision."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44074|pull_request:44161", "pull_request:44147|pull_request:44161", "pull_request:43996|pull_request:44049", "pull_request:44047|pull_request:44333", "pull_request:44010|pull_request:44071", "pull_request:44010|pull_request:44073", "pull_request:43997|pull_request:44074", "pull_request:44018|pull_request:44129", "pull_request:44071|pull_request:44089", "pull_request:43997|pull_request:44129", "pull_request:44010|pull_request:44018", "pull_request:44074|pull_request:44101", "pull_request:43996|pull_request:44161", "pull_request:44010|pull_request:44044", "pull_request:44044|pull_request:44074", "pull_request:44044|pull_request:44147", "pull_request:44011|pull_request:44091"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20420, "estimated_input_tokens": 10082, "item_count": 17, "node_count": 17, "serialized_chars": 40326, "soft_pair_count": 20}, "cached_at": "2026-04-14T18:15:39Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2aa0ed0af5c911570d8465822cb844bddaa5ca776e3badc883bd4ad4e66f8ac2", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43996", "pull_request:44007", "pull_request:44011", "pull_request:44020", "pull_request:44022", "pull_request:44023", "pull_request:44044", "pull_request:44066", "pull_request:44084", "pull_request:44085", "pull_request:44089", "pull_request:44093", "pull_request:44104", "pull_request:44129", "pull_request:44140", "pull_request:44154", "pull_request:44722"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44066 is the strongest single representative of the cluster theme: it covers the shared GPT-J output-tracing refactor, includes the related CodeGen adaptation, and is more central than the narrower one-file variants.", "canonical_issue_reason": null, "canonical_pr_reason": "No issue item is present. For the PR side, 44066 is the best representative because it is the broadest concrete GPT-J/CodeGen output-tracing refactor, explicitly tied to #43979, and it sits at the center of the GPT-J duplicate subfamily.", "confidence": 0.87, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44020", "reason": "Same DeBERTa output-tracing refactor in the same file with matching target and diff shape; this looks like the same change variant.", "right": "pull_request:44093"}, {"accept": true, "left": "pull_request:44066", "reason": "Both are GPT-J/CodeGen standardized output-tracing refactors on the same code paths; 44722 reads like a later rewrite of the same change set.", "right": "pull_request:44722"}, {"accept": true, "left": "pull_request:44066", "reason": "Both target GPT-J output tracing in the same module; 44066 is just a broader variant that also touches CodeGen, so they are mergeable as one change family.", "right": "pull_request:44084"}, {"accept": false, "left": "pull_request:44022", "reason": "Different model implementations (ConvBERT vs Nystromformer); same meta-theme, but not the same concrete bug or patch.", "right": "pull_request:44023"}, {"accept": false, "left": "pull_request:44011", "reason": "Different model code paths (Swin vs ConvBERT) and no shared concrete change beyond the generic tracing refactor theme.", "right": "pull_request:44022"}, {"accept": true, "left": "pull_request:44084", "reason": "Both are the same GPT-J standardized output-tracing refactor family on the same file, with one version just broader/later than the other.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44020", "reason": "DeBERTa and Nystromformer are unrelated concrete implementations; this is only a shared issue bucket, not a duplicate patch.", "right": "pull_request:44023"}, {"accept": false, "left": "pull_request:44023", "reason": "Different models and different forward paths; similar tracing cleanup, but not the same underlying change.", "right": "pull_request:44093"}, {"accept": false, "left": "pull_request:44020", "reason": "Different model files and different attention/output plumbing; too broad a similarity to treat as one PR.", "right": "pull_request:44022"}, {"accept": false, "left": "pull_request:44022", "reason": "ConvBERT and DeBERTa are separate implementations; they share only the umbrella output-tracing effort.", "right": "pull_request:44093"}, {"accept": true, "left": "pull_request:44084", "reason": "Near-identical GPT-J refactor in the same file and same issue context; these look like duplicate attempts at the same patch.", "right": "pull_request:44085"}, {"accept": false, "left": "pull_request:44089", "reason": "Different models (SqueezeBERT/T5 vs SpeechT5) and different code paths; not a duplicate change.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:44020", "reason": "DeBERTa and SqueezeBERT/T5 are separate model implementations; the shared issue target is not enough to merge them.", "right": "pull_request:44089"}, {"accept": false, "left": "pull_request:43996", "reason": "CVT/FNet-style tracing changes vs RegNet output tracing are separate model-specific refactors, not one concrete bugfix.", "right": "pull_request:44007"}, {"accept": false, "left": "pull_request:43996", "reason": "These touch different model families and different outputs; only the high-level tracing initiative is shared.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:44044", "reason": "DeBERTa-v2 and SpeechT5 are unrelated code paths; same umbrella issue, but not the same PR.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:44044", "reason": "Different model families and different forward/output plumbing, so this is not the same underlying fix.", "right": "pull_request:44154"}, {"accept": false, "left": "pull_request:44023", "reason": "Nystromformer and GPT-J are distinct implementations; similar refactor pattern only.", "right": "pull_request:44085"}, {"accept": false, "left": "pull_request:44093", "reason": "DeBERTa and GPT-J are different model code paths; they belong to the same umbrella effort but not the same patch.", "right": "pull_request:44722"}], "summary": "This cluster is mostly a family of model-specific output-tracing refactors tied to issue #43979, not one single duplicate PR set. The only clear duplicate subgroups are the DeBERTa pair, the Megatron-BERT pair, and the GPT-J/CodeGen GPT-J subfamily; the rest are separate model ports of the same framework idea."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44020|pull_request:44093", "pull_request:44066|pull_request:44722", "pull_request:44066|pull_request:44084", "pull_request:44022|pull_request:44023", "pull_request:44011|pull_request:44022", "pull_request:44084|pull_request:44722", "pull_request:44020|pull_request:44023", "pull_request:44023|pull_request:44093", "pull_request:44020|pull_request:44022", "pull_request:44022|pull_request:44093", "pull_request:44084|pull_request:44085", "pull_request:44089|pull_request:44129", "pull_request:44020|pull_request:44089", "pull_request:43996|pull_request:44007", "pull_request:43996|pull_request:44129", "pull_request:44044|pull_request:44129", "pull_request:44044|pull_request:44154", "pull_request:44023|pull_request:44085", "pull_request:44093|pull_request:44722", "pull_request:44104|pull_request:44140"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20596, "estimated_input_tokens": 10170, "item_count": 18, "node_count": 18, "serialized_chars": 40678, "soft_pair_count": 20}, "cached_at": "2026-04-14T18:17:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "677c46754458a7960d909a5ca96144984315421034d5901e6f1bf0a9a9951153", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44012", "pull_request:44024", "pull_request:44046", "pull_request:44049", "pull_request:44071", "pull_request:44073", "pull_request:44074", "pull_request:44076", "pull_request:44092", "pull_request:44094", "pull_request:44102", "pull_request:44104", "pull_request:44105", "pull_request:44106", "pull_request:44138", "pull_request:44139", "pull_request:44140", "pull_request:44141"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44102 is the best single representative: it has a clean, model-specific output-tracing refactor scope and an exact duplicate in 44141, with the strongest reference activity in its family.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR spans the whole set because it breaks into several independent duplicate families by model. If one anchor is needed, 44102 is the clearest exact-duplicate representative.", "confidence": 0.88, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44102", "reason": "Same IBERT output-tracing refactor in the same file; this looks like an exact duplicate pair.", "right": "pull_request:44141"}, {"accept": true, "left": "pull_request:44105", "reason": "Same Lilt output-tracing refactor in the same file; the diffs clearly point to the same underlying change.", "right": "pull_request:44139"}, {"accept": true, "left": "pull_request:44106", "reason": "Same YOSO output-tracing refactor; 44138 also adds MRA work, but the YOSO change itself is the same underlying patch.", "right": "pull_request:44138"}, {"accept": true, "left": "pull_request:44046", "reason": "Same CodeGen output-tracing refactor in the same file with near-identical changes.", "right": "pull_request:44094"}, {"accept": true, "left": "pull_request:44012", "reason": "Same SwinV2 output-tracing refactor in the same file; this is a duplicate PR pair.", "right": "pull_request:44092"}, {"accept": false, "left": "pull_request:44024", "reason": "Different models and different code paths (FocalNet vs ImageGPT); they are only similar in refactor pattern, not the same change.", "right": "pull_request:44076"}, {"accept": false, "left": "pull_request:44139", "reason": "Lilt and Megatron-BERT are unrelated model code paths; same broad refactor theme is not enough to merge them.", "right": "pull_request:44140"}, {"accept": false, "left": "pull_request:44138", "reason": "YOSO/MRA refactor versus Megatron-BERT refactor: different models, different underlying changes.", "right": "pull_request:44140"}, {"accept": false, "left": "pull_request:44105", "reason": "Different models and implementation paths; only the output-tracing theme overlaps.", "right": "pull_request:44140"}, {"accept": false, "left": "pull_request:44104", "reason": "Megatron-BERT and Lilt are distinct models with separate refactors; not the same concrete fix.", "right": "pull_request:44105"}, {"accept": false, "left": "pull_request:44104", "reason": "Same general output-tracing cleanup theme, but different model/code path and unrelated patches.", "right": "pull_request:44139"}, {"accept": false, "left": "pull_request:44106", "reason": "YOSO and Megatron-BERT are different model implementations; no shared concrete bug/change.", "right": "pull_request:44140"}, {"accept": false, "left": "pull_request:44104", "reason": "Different models and different implementation details; similarity is only at the refactor pattern level.", "right": "pull_request:44106"}, {"accept": false, "left": "pull_request:44104", "reason": "Megatron-BERT is unrelated to the YOSO/MRA patch set; same infrastructure change is too broad to count as a duplicate.", "right": "pull_request:44138"}, {"accept": false, "left": "pull_request:44138", "reason": "YOSO/MRA and Lilt are different model refactors, not the same underlying change.", "right": "pull_request:44139"}, {"accept": false, "left": "pull_request:44106", "reason": "YOSO and Lilt are separate model-specific refactors with no shared code path.", "right": "pull_request:44139"}, {"accept": false, "left": "pull_request:44105", "reason": "Lilt vs YOSO: different models, so this is a generic similarity false positive.", "right": "pull_request:44106"}, {"accept": false, "left": "pull_request:44105", "reason": "Lilt does not share the same concrete change with the YOSO/MRA patch set.", "right": "pull_request:44138"}, {"accept": false, "left": "pull_request:44049", "reason": "FNet and MPT are different models; these are separate refactors that only resemble each other structurally.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44073", "reason": "VisualBERT and TextNet are unrelated code paths; shared output-tracing language is not enough to duplicate them.", "right": "pull_request:44074"}], "summary": "This set is mostly model-specific refactors to standardized output tracing. The only true duplicate pairs are within the same model/file family (IBERT, Lilt, CodeGen, Swinv2), while the cross-model similarities are mostly false positives. One YOSO PR appears to subsume the same YOSO refactor plus an extra MRA change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44102|pull_request:44141", "pull_request:44105|pull_request:44139", "pull_request:44106|pull_request:44138", "pull_request:44046|pull_request:44094", "pull_request:44012|pull_request:44092", "pull_request:44024|pull_request:44076", "pull_request:44139|pull_request:44140", "pull_request:44138|pull_request:44140", "pull_request:44105|pull_request:44140", "pull_request:44104|pull_request:44105", "pull_request:44104|pull_request:44139", "pull_request:44106|pull_request:44140", "pull_request:44104|pull_request:44106", "pull_request:44104|pull_request:44138", "pull_request:44138|pull_request:44139", "pull_request:44106|pull_request:44139", "pull_request:44105|pull_request:44106", "pull_request:44105|pull_request:44138", "pull_request:44049|pull_request:44071", "pull_request:44073|pull_request:44074"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 17334, "estimated_input_tokens": 8539, "item_count": 14, "node_count": 14, "serialized_chars": 34154, "soft_pair_count": 21}, "cached_at": "2026-04-14T18:18:15Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0974badc631f334b6f1aa7b8e99f3147eae9c4441632eafb9e8870b222c46065", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43997", "pull_request:44010", "pull_request:44046", "pull_request:44047", "pull_request:44056", "pull_request:44073", "pull_request:44074", "pull_request:44094", "pull_request:44129", "pull_request:44147", "pull_request:44148", "pull_request:44149", "pull_request:44150", "pull_request:44151"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44148 best represents the cluster as a whole because it spans the common output-tracing refactor across several files, making it the most central umbrella candidate.", "canonical_issue_reason": null, "canonical_pr_reason": "44148 is the broadest representative of the shared refactor pattern, touching multiple model implementations and overlapping with several other PRs in the cluster.", "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44046", "reason": "Same refactor theme, but different models and files (CodeGen vs Bloom); not the same concrete change.", "right": "pull_request:44047"}, {"accept": false, "left": "pull_request:44047", "reason": "Different model targets (Bloom vs CodeGen); similar decorator migration, but not a duplicate change.", "right": "pull_request:44094"}, {"accept": false, "left": "pull_request:44056", "reason": "MPNet and VisualBert are separate model-specific refactors; shared pattern only, not the same patch.", "right": "pull_request:44073"}, {"accept": false, "left": "pull_request:44056", "reason": "MPNet vs TextNet are distinct implementations with separate edits, so this is not the same underlying change.", "right": "pull_request:44074"}, {"accept": false, "left": "pull_request:44147", "reason": "44148 is a broader multi-model sweep that includes CTRL, but it is not the same single PR/change as the CTRL-only refactor.", "right": "pull_request:44148"}, {"accept": false, "left": "pull_request:44147", "reason": "Different model scope and filenames; they share the output-tracing refactor theme but not the same concrete change.", "right": "pull_request:44149"}, {"accept": false, "left": "pull_request:44147", "reason": "CTRL-only vs MPT-only refactors are separate model-specific changes.", "right": "pull_request:44150"}, {"accept": false, "left": "pull_request:44147", "reason": "CTRL-only vs BioGPT+MPT is not a duplicate; overlapping theme only.", "right": "pull_request:44151"}, {"accept": false, "left": "pull_request:44148", "reason": "Both are multi-model refactor sweeps, but they touch different file sets and are not the same exact change.", "right": "pull_request:44149"}, {"accept": false, "left": "pull_request:44148", "reason": "Broader batch refactor vs MPT-only refactor; related, but not the same patch.", "right": "pull_request:44150"}, {"accept": false, "left": "pull_request:44148", "reason": "Overlapping output-tracing sweep, but different model coverage and not a single shared code-path fix.", "right": "pull_request:44151"}, {"accept": false, "left": "pull_request:44149", "reason": "One is a broader batch refactor and the other is MPT-only; not a duplicate.", "right": "pull_request:44150"}, {"accept": false, "left": "pull_request:44149", "reason": "Both involve output tracing, but they cover different model sets and are not the same change.", "right": "pull_request:44151"}, {"accept": false, "left": "pull_request:44150", "reason": "MPT-only vs BioGPT+MPT is overlapping scope, not duplicate code.", "right": "pull_request:44151"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and SqueezeBert are unrelated model-specific refactors; same pattern only.", "right": "pull_request:44010"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and CTRL target different models and patches.", "right": "pull_request:44147"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet is a separate model refactor from the multi-model CTRL/Bros/CPMAnt/MPT sweep.", "right": "pull_request:44148"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet does not match the BioGPT/other batch refactor scope.", "right": "pull_request:44149"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and MPT are separate model implementations with distinct changes.", "right": "pull_request:44150"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet is unrelated to the BioGPT/MPT output-tracing refactor.", "right": "pull_request:44151"}], "summary": "A cluster of closely related model refactors standardizing output tracing with `capture_outputs` / `can_return_tuple` across different architectures. They share the same refactor theme, but most candidates target different models or broader/narrower sweeps rather than the same concrete change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44046|pull_request:44047", "pull_request:44047|pull_request:44094", "pull_request:44056|pull_request:44073", "pull_request:44056|pull_request:44074", "pull_request:44147|pull_request:44148", "pull_request:44147|pull_request:44149", "pull_request:44147|pull_request:44150", "pull_request:44147|pull_request:44151", "pull_request:44148|pull_request:44149", "pull_request:44148|pull_request:44150", "pull_request:44148|pull_request:44151", "pull_request:44149|pull_request:44150", "pull_request:44149|pull_request:44151", "pull_request:44150|pull_request:44151", "pull_request:43997|pull_request:44010", "pull_request:43997|pull_request:44147", "pull_request:43997|pull_request:44148", "pull_request:43997|pull_request:44149", "pull_request:44010|pull_request:44129", "pull_request:43997|pull_request:44150", "pull_request:43997|pull_request:44151"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2434, "estimated_input_tokens": 1089, "item_count": 2, "node_count": 2, "serialized_chars": 4354, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:18:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7620ab5cfacbb44763e59741f1d985163d395dbb8af45eb3f7502a64b5337f9f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43607", "pull_request:43654"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43654 is the better representative of the cluster only in the sense that it has a clearer self-contained bug fix with tests; however, it should not be treated as a duplicate of PR 43607.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR fits this cluster because the PRs solve unrelated bugs in different components (Bark modeling vs tokenizer special-token registration).", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43607", "reason": "Reject: these PRs address unrelated problems in different code paths. 43607 fixes Bark CPU offload/device mismatch in modeling_bark.py; 43654 fixes duplicate/overwrite behavior when adding extra special tokens in tokenizer utilities.", "right": "pull_request:43654"}], "summary": "The two PRs are not duplicates: one fixes a Bark CPU offload/device-placement mismatch in model forward pass, while the other fixes tokenizer extra-special-token handling during batch add_tokens. Different subsystems, different failure modes, and different code paths."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43607|pull_request:43654"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3298, "estimated_input_tokens": 1521, "item_count": 2, "node_count": 2, "serialized_chars": 6082, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:18:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "25d9fbb21f98790466cdf3f8248735806154dab7687f0e361859f87ef0ec11f8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43612", "pull_request:43613"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43613 is the better standalone PR to keep because it covers the more comprehensive segmentation pipeline implementation and associated integrations; PR 43612 is a separate feature for a different segmentation task.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43613 is the broader and more mature pipeline addition, with more model-family integrations and test coverage. It is representative of the cluster only in the sense that both PRs are nearby feature additions, but they are not duplicates.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43612", "reason": "Reject: these are different feature PRs for different pipelines (concept segmentation vs visual segmentation). Shared docs/registration files and a common pipeline theme are not enough to treat them as the same underlying change.", "right": "pull_request:43613"}], "summary": "Two open pull requests add different pipeline features: one for Promptable Concept Segmentation and one for Promptable Visual Segmentation. They share some documentation and registration plumbing, but the underlying tasks, model integrations, and tests are distinct."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43612|pull_request:43613"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2460, "estimated_input_tokens": 1102, "item_count": 2, "node_count": 2, "serialized_chars": 4406, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:19:01Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f7b29fd8e3bf58a349a5cc569e468adb970693de9644da51ee4e8278c19e3e90", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43628", "pull_request:43629"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43629 is the best representative because it touches the end-to-end CI/comment reporting flow rather than only one parser path.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43629 is the broader, more central reporting-pipeline change and better represents the cluster theme; #43628 is a narrower compatibility fix in a utility script.", "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43628", "reason": "Related topic, but not the same underlying change: #43628 adapts `check_bad_commit.py` to a new JSON format, while #43629 rewires workflow/report outputs for PR comment CI. They affect different code paths and would not naturally be merged as one PR.", "right": "pull_request:43629"}], "summary": "Both PRs are about new-failure reporting, but they fix different parts of the pipeline: one normalizes `new_failures.json` parsing in the commit-check utility, the other changes CI/workflow output plumbing for PR comment reporting."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43628|pull_request:43629"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2006, "estimated_input_tokens": 875, "item_count": 2, "node_count": 2, "serialized_chars": 3498, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:20:44Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4c9d356e41828a10590067257c38bf50e27cd024887452439f0922aabda9a7d7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43642", "pull_request:45100"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No strong best PR for the cluster; neither PR represents the other, since they edit different files for different documentation concerns.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR: the changes are unrelated (benchmark_config comment cleanup vs accelerator_selection docs wording change) and do not describe the same concrete fix or code path.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43642", "reason": "Reject: the PRs touch different files and different concerns, with no shared underlying bug or mergeable code change; one only removes a benchmark_config comment line, the other rewrites accelerator selection docs.", "right": "pull_request:45100"}], "summary": "These two pull requests are both small docs/comment cleanups, but they target different files and unrelated topics, so they do not look like duplicates or the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43642|pull_request:45100"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2404, "estimated_input_tokens": 1074, "item_count": 2, "node_count": 2, "serialized_chars": 4293, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:21:03Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d8f070f8cc0ee19d88bac7f7a7c24b18f38b80652706d5de45a1445122d2d794", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43651", "pull_request:43663"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43651 is the more concrete bug-fix style change and a better representative of the cluster than 43663, which is a separate extensibility refactor for column filtering.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43651", "reason": "Same file, but different underlying code paths and goals: GA loss scaling vs. signature-column extraction. They are not the same fix and would not plausibly be merged as one PR.", "right": "pull_request:43663"}], "summary": "Two open PRs touch the same file but address unrelated trainer behaviors: one changes gradient-accumulation loss scaling, the other extracts signature-column filtering for PEFT/custom trainers. They are not duplicates and should remain separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43651|pull_request:43663"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3252, "estimated_input_tokens": 1498, "item_count": 3, "node_count": 3, "serialized_chars": 5992, "soft_pair_count": 2}, "cached_at": "2026-04-14T18:22:32Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c5c2fee1fe273e4b5280b42e6443d546f63efabe884a461d92374cb9b352cd34", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43656", "pull_request:43836", "pull_request:43842"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43842 is the best representative of the cluster because it addresses the concrete runtime failure with the smallest, clearest change set and the most precise description of the bug.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43842 is the most direct and minimal fix: it explicitly moves TypeAdapter behind TYPE_CHECKING and string-literals the runtime annotation, matching the NameError described in the title without extra refactor noise.", "confidence": 0.84, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43656", "reason": "Both PRs target the same TypeAdapter NameError in transformers CLI/serve.py. 43656 appears to be a broader variant of the same fix (future annotations/typing cleanup), while 43842 is the more focused implementation; they plausibly belong to one merged change.", "right": "pull_request:43842"}, {"accept": true, "left": "pull_request:43836", "reason": "These are the same underlying bug fix in the same file: avoiding runtime evaluation of TypeAdapter annotations when pydantic is unavailable. The implementation differs, but the code-path problem is the same and they could plausibly be consolidated.", "right": "pull_request:43842"}], "summary": "All three PRs are overlapping fixes in src/transformers/cli/serve.py for a TypeAdapter-related NameError in the CLI. PR 43842 is the cleanest, most narrowly targeted fix; 43656 and 43836 look like alternate/earlier variants of the same underlying annotation/import problem."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "Grounded overall. The packet supports that all three PRs touch src/transformers/cli/serve.py and address the same TypeAdapter/annotation runtime-import problem, with 43842 the most narrowly targeted fix. The soft-edge acceptances are conservative enough because the changes are plausibly mergeable into one PR for the same bugfix."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:43656|pull_request:43842", "pull_request:43836|pull_request:43842"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2550, "estimated_input_tokens": 1147, "item_count": 2, "node_count": 2, "serialized_chars": 4586, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:22:48Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "74172adb069a2081e2e3bce3243d6ec8dc96747fe57ba68b8d30d5723e8f76d5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43659", "pull_request:43840"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43659 is the best representative of the cluster since it resolves the bug and is already merged.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43659 is the better canonical PR because it is the merged implementation of the exact fix; #43840 is an unmerged duplicate with the same diff and issue target.", "confidence": 0.99, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43659", "reason": "Same concrete bug fix, same explicit issue target, same files, and essentially identical diff; they could plausibly be merged into one PR.", "right": "pull_request:43840"}], "summary": "Both PRs make the same Qwen3-VL timestamp fix: use `temporal_patch_size` instead of `merge_size` in the processor and modular model code. They target the same issue and have effectively identical diffs, so they are duplicates of the same change request."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43659|pull_request:43840"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2370, "estimated_input_tokens": 1057, "item_count": 2, "node_count": 2, "serialized_chars": 4226, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:22:59Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "de5a6e2affeba6a1ac3dcd6228a34960ab965b47c1b0afab229d192cfefe0aa3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43705", "pull_request:44777"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43705 is the best representative because it contains the actual code-path fix/feature. PR #44777 is documentation-only and depends on the implementation already existing.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43705 is the substantive implementation of bidirectional attention by routing `is_causal=False` to a bidirectional mask path in `masking_utils.py`; it changes behavior, not just docs.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43705", "reason": "Related feature area, but not mergeable as one PR: #43705 implements bidirectional attention in code, while #44777 only adds documentation for `is_causal`. They do not fix the same concrete code-path problem.", "right": "pull_request:44777"}], "summary": "These are related feature follow-ups, but they are not the same underlying change: one adds the bidirectional-attention implementation, the other only documents the new `is_causal` behavior. The code PR is the better canonical PR candidate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43705|pull_request:44777"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3778, "estimated_input_tokens": 1761, "item_count": 3, "node_count": 3, "serialized_chars": 7041, "soft_pair_count": 2}, "cached_at": "2026-04-14T18:23:26Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ab6391ae8d53e574ed88ced38f7dc4fa1deac9bd17fe09cc70b285b1bbf80328", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43714", "pull_request:43736", "pull_request:43738"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43736 is the clearest example of the shared refactor pattern in this cluster and has the most broadly representative scope without being too specialized to one feature path.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43736 is the best representative of the cluster because it is a focused Trainer refactor that extracts shared helper logic into trainer_utils-style standalone functions, matching the general theme of the other PRs while remaining self-contained.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43714", "reason": "Both touch Trainer refactoring, but one moves NEFTune hooks and the other moves checkpoint sorting/rotation; these are different code paths and not the same underlying change.", "right": "pull_request:43736"}, {"accept": false, "left": "pull_request:43736", "reason": "These are separate refactors: checkpoint management versus optimizer-class initialization. Shared Trainer files and similar titles are not enough to treat them as the same PR.", "right": "pull_request:43738"}], "summary": "Three merged PRs refactor different Trainer internals into standalone modules/functions. They are stylistically similar but implement distinct changes: NEFTune helpers, checkpoint sorting/rotation, and optimizer initialization."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43714|pull_request:43736", "pull_request:43736|pull_request:43738"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2426, "estimated_input_tokens": 1085, "item_count": 2, "node_count": 2, "serialized_chars": 4337, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:23:42Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "68a516340f5281992870af4c8770a5a84b442c64b1fc3c9f9146c069e3f48712", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43718", "pull_request:43770"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43770 is the best overall PR: it fully addresses the bug, is merged, and adds validation beyond the code change itself.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43770 is the best canonical PR because it is merged, targets the same `Trainer.train` code path, and includes test coverage for the batch-size resume behavior.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43718", "reason": "Same underlying bug and same fix site in `src/transformers/trainer.py`; both change the checkpoint `train_batch_size` restore condition for resume behavior. They could plausibly be merged into one PR, with 43770 extending the earlier fix with tests.", "right": "pull_request:43770"}], "summary": "Both PRs fix the same Trainer resume logic bug: unconditional restoration of `train_batch_size` from checkpoint, which overrides the user\u2019s current batch-size configuration. PR 43770 is the stronger canonical choice because it is merged and adds tests; PR 43718 is an earlier, narrower version of the same fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43718|pull_request:43770"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 8178, "estimated_input_tokens": 3961, "item_count": 6, "node_count": 6, "serialized_chars": 15841, "soft_pair_count": 9}, "cached_at": "2026-04-14T18:27:14Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b758364dc7450ed3d5f9b8a46e774ba7cb4b6e3789db475bbe359cc9b0ab6aed", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43719", "pull_request:43739", "pull_request:43779", "pull_request:43788", "pull_request:43816", "pull_request:43848"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43719 is the best overall representative because it is merged, directly addresses the feature request, and contains the primary source change rather than a later test/follow-up variant.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43719 is the clearest canonical representative: it is merged, explicitly targets the tracked issue, and makes the core SwanLab integration change in `integration_utils.py` to add run ID/resume support.", "confidence": 0.84, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43719", "reason": "Same SwanLab resume/id feature in the same file and same tracked issue; these are duplicate implementation attempts for the same code path.", "right": "pull_request:43816"}, {"accept": true, "left": "pull_request:43719", "reason": "Both PRs implement the same SwanLab integration resume/id support in `integration_utils.py` and target the same issue.", "right": "pull_request:43788"}, {"accept": true, "left": "pull_request:43739", "reason": "Same underlying SwanLab resume/id integration change; both modify the same callback/integration path for the same feature.", "right": "pull_request:43816"}, {"accept": true, "left": "pull_request:43739", "reason": "These are alternative PRs for the same SwanLab resume/id support change in the same subsystem and can plausibly be merged into one patch.", "right": "pull_request:43788"}, {"accept": true, "left": "pull_request:43739", "reason": "Both PRs work on the same SwanLab callback resume/id forwarding behavior, so they look like duplicate implementations of the same fix.", "right": "pull_request:43779"}, {"accept": true, "left": "pull_request:43788", "reason": "Same feature, same file, same issue target: both are duplicate attempts to add SwanLab run ID/resume support.", "right": "pull_request:43816"}, {"accept": false, "left": "pull_request:43739", "reason": "43848 is a later test/follow-up PR about forwarding resume init args, not the same concrete implementation patch as 43739.", "right": "pull_request:43848"}, {"accept": true, "left": "pull_request:43779", "reason": "Both PRs aim to add the same SwanLab callback resume/id support and are close enough to be merged as one change.", "right": "pull_request:43788"}, {"accept": false, "left": "pull_request:43719", "reason": "43719 adds environment-variable plumbing in `integration_utils.py`, while 43779 is a callback-kwargs forwarding change; related feature, but not the same concrete code-path fix.", "right": "pull_request:43779"}], "summary": "This cluster is a set of overlapping SwanLab resume/id support PRs around the same integration area, with one merged implementation and several near-duplicate follow-ups/variants. The strongest canonical PR is the merged integration update that directly adds the feature in the target code path."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43719|pull_request:43816", "pull_request:43719|pull_request:43788", "pull_request:43739|pull_request:43816", "pull_request:43739|pull_request:43788", "pull_request:43739|pull_request:43779", "pull_request:43788|pull_request:43816", "pull_request:43739|pull_request:43848", "pull_request:43779|pull_request:43788", "pull_request:43719|pull_request:43779"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2682, "estimated_input_tokens": 1213, "item_count": 2, "node_count": 2, "serialized_chars": 4851, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:27:41Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "92a59ddecae30153a170a2619905dce23f1aebafb598e197d29e3d3272b66087", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43752", "pull_request:43888"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43888 is the best representative of the cluster because it contains the actual Param2MoE architecture support and associated test coverage.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43888 is the clearest canonical PR: it directly adds Param2MoE config/model integration and tests in the relevant src paths. PR #43752 does not show the same focused code-path change and appears broader/different.", "confidence": 0.71, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43752", "reason": "Despite the similar title/filenames, the evidence is not strong enough that these are the same concrete change. #43888 is a focused model-support PR, while #43752 appears broader and may be a different release/version-oriented update rather than the same code-path fix.", "right": "pull_request:43888"}], "summary": "Two PRs mention Param2MoE, but only one is a concrete model-implementation change. The other looks like a broader version/release-related PR, so I would not collapse them as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43752|pull_request:43888"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 4146, "estimated_input_tokens": 1945, "item_count": 3, "node_count": 3, "serialized_chars": 7778, "soft_pair_count": 3}, "cached_at": "2026-04-14T18:28:02Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "37c22b81e619fcff2d083576f081acb4910c53a37c05b2f02ed958c95063628f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43757", "pull_request:45116", "pull_request:45118"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45118 is the best overall representative because it is the most complete implementation of the same GPT-OSS GGUF loading fix and explicitly supersedes the earlier workaround PR.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45118 is the strongest canonical PR: it explicitly supersedes #43757, covers the full GGUF loading path, and includes the broader set of files/tests needed for the feature.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45116", "reason": "Both are full GPT-OSS GGUF loading support changes for the same underlying feature/problem, with overlapping implementation files and the same issue target; #45118 is just the more complete version.", "right": "pull_request:45118"}, {"accept": false, "left": "pull_request:43757", "reason": "#43757 is only a fallback to avoid a hard failure by mapping GPT-OSS GGUF to gpt-neox, while #45116 implements full GGUF loading support; these are different code-path fixes.", "right": "pull_request:45116"}, {"accept": false, "left": "pull_request:43757", "reason": "#43757 is a temporary compatibility workaround, whereas #45118 is the full GGUF loading implementation; they address related but not identical problems and would not be merged as one PR.", "right": "pull_request:45118"}], "summary": "The cluster is about GPT-OSS GGUF loading. PR #45116 and PR #45118 are effectively the same feature work, with #45118 being the more complete successor. PR #43757 is only a fallback workaround and not the same full change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45116|pull_request:45118", "pull_request:43757|pull_request:45116", "pull_request:43757|pull_request:45118"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2352, "estimated_input_tokens": 1048, "item_count": 2, "node_count": 2, "serialized_chars": 4191, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:28:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5cf41ca857a625fd63a3320398a04e181029a48b9d3a92f48cc60ada2741347b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43777", "pull_request:44099"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43777 is the better representative only by being the earlier of the two version-bump changes, but it is not a duplicate of 44099.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR: both are legitimate, distinct release-version bumps for different target versions.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43777", "reason": "Both are version bump PRs, but they bump to different versions and represent separate release updates rather than the same code change.", "right": "pull_request:44099"}], "summary": "These are two separate dev-version bump PRs for different releases (5.2.0.dev0 and 5.3.0.dev0). They touch the same files but are not the same underlying change and should not be deduplicated."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43777|pull_request:44099"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2430, "estimated_input_tokens": 1087, "item_count": 2, "node_count": 2, "serialized_chars": 4347, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:28:42Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5c9d7f4c5c030cfcebba4b1cc5351b7b4d0c0a86f69179b10c44ef7945d6b105", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43790", "pull_request:45014"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "If a single PR must represent the cluster, 43790 is the better fit by scope, but there is no true duplicate relationship between the two PRs.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43790 is the broader, more representative CI-maintenance change across multiple Dockerfiles; PR 45014 is a separate utility-behavior fix and not a duplicate.", "confidence": 0.99, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43790", "reason": "Different code paths and problems: CircleCI Torch version unpinning vs skipping `tests_hub` generation when no tests are found. They are not plausibly mergeable as one PR.", "right": "pull_request:45014"}], "summary": "These two pull requests are unrelated: one changes CircleCI Torch pins in Dockerfiles, the other changes test-list generation to skip `tests_hub` when no tests are found."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43790|pull_request:45014"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 17142, "estimated_input_tokens": 8443, "item_count": 14, "node_count": 14, "serialized_chars": 33772, "soft_pair_count": 14}, "cached_at": "2026-04-14T18:29:53Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "13f3500861b0b269ad6a364da973ea05f48f0b3cbf5cb321c5ce4a256c772a10", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43798", "pull_request:43820", "pull_request:43988", "pull_request:44113", "pull_request:44266", "pull_request:44581", "pull_request:44611", "pull_request:44695", "pull_request:44765", "pull_request:44899", "pull_request:45004", "pull_request:45074", "pull_request:45085", "pull_request:45199"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45074 is the strongest standalone PR in the cluster because it has an explicit issue target, a clear concrete bug, and a complete merged fix with tests; the other items are mostly unrelated test-only or model-specific changes.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45074 is the best representative of the only true overlap in the set: it is a merged, scoped fix for the SwitchTransformers/TimmWrapper dtype mismatch and includes tests, making it the cleanest canonical patch over the narrower overlapping variant 45085.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43798", "reason": "Different problems: test helper usage changes across BLOOM/CLAP/CLVP versus a DAC latent-decoding STE fix. Same general area (tests/models) but not the same underlying bug.", "right": "pull_request:43820"}, {"accept": false, "left": "pull_request:43820", "reason": "DAC from_latents STE bug and Perceiver positional interpolation bug are unrelated model-specific code paths.", "right": "pull_request:44899"}, {"accept": false, "left": "pull_request:43820", "reason": "DAC latent decoding is unrelated to GraniteSpeech test device placement behavior.", "right": "pull_request:44113"}, {"accept": false, "left": "pull_request:43820", "reason": "DAC STE logic and SwitchTransformers/TimmWrapper dtype handling are different bugs in different model families.", "right": "pull_request:45074"}, {"accept": false, "left": "pull_request:43988", "reason": "Shared as test/CI fixes only; LayoutXLM tokenization/lighton_ocr test updates are unrelated to Llama4 vision rotary initialization.", "right": "pull_request:44581"}, {"accept": false, "left": "pull_request:43988", "reason": "Both touch tests, but one is a LayoutXLM/LightOnOCR CI fix and the other is a Phi-3/Parakeet/Evolla/Pi0 test adjustment; no same underlying change.", "right": "pull_request:45004"}, {"accept": false, "left": "pull_request:43988", "reason": "Unrelated CI test fixes in different model families (LayoutXLM/LightOnOCR vs PaliGemma2/PaddleOCR-VL).", "right": "pull_request:44765"}, {"accept": false, "left": "pull_request:43988", "reason": "LayoutXLM tokenization test cleanup and OmDet-Turbo timm-kwargs forwarding address different issues.", "right": "pull_request:44611"}, {"accept": false, "left": "pull_request:44266", "reason": "torch.export strict-mode check compatibility is a utility change; SwitchTransformers/TimmWrapper is a model dtype mismatch. Not the same bug.", "right": "pull_request:45074"}, {"accept": false, "left": "pull_request:44611", "reason": "OmDet-Turbo timm kwargs forwarding and Perceiver interpolation correction are distinct model fixes.", "right": "pull_request:44899"}, {"accept": false, "left": "pull_request:44695", "reason": "Kyutai/LongCatFlash test failures are unrelated to the Phi-3 and other test fix bundle.", "right": "pull_request:45004"}, {"accept": false, "left": "pull_request:44765", "reason": "PaddleOCR-VL/PaliGemma2 offload-test skips are not the same issue as Phi-3/Parakeet/Evolla/Pi0 generation/test adjustments.", "right": "pull_request:45004"}, {"accept": false, "left": "pull_request:45074", "reason": "SwitchTransformers/TimmWrapper dtype mismatch and Wav2Vec2Phoneme tokenizer delimiter regression are unrelated.", "right": "pull_request:45199"}, {"accept": true, "left": "pull_request:45074", "reason": "Both target the same concrete dtype-mismatch problem in SwitchTransformers/TimmWrapper with the same issue target and overlapping code paths; 45085 looks like a narrower variant of the same fix.", "right": "pull_request:45085"}], "summary": "Most pairs are only loosely similar CI/test-fix PRs in different models and should not be merged as duplicates. The only strong duplicate-like pair is the SwitchTransformers/TimmWrapper dtype-mismatch fixes (45074/45085), which target the same concrete code path and issue."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43798|pull_request:43820", "pull_request:43820|pull_request:44899", "pull_request:43820|pull_request:44113", "pull_request:43820|pull_request:45074", "pull_request:43988|pull_request:44581", "pull_request:43988|pull_request:45004", "pull_request:43988|pull_request:44765", "pull_request:43988|pull_request:44611", "pull_request:44266|pull_request:45074", "pull_request:44611|pull_request:44899", "pull_request:44695|pull_request:45004", "pull_request:44765|pull_request:45004", "pull_request:45074|pull_request:45199", "pull_request:45074|pull_request:45085"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2134, "estimated_input_tokens": 939, "item_count": 2, "node_count": 2, "serialized_chars": 3756, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:30:30Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b95dc6b4d21ae303713cf694816667152bbac9da38bb1ec77d754f00f49bb936", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43812", "pull_request:43822"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43822 is the best representative of the cluster: it directly targets the named bug, is the more complete fix, and is the one that appears to subsume the broader import-order/type-hint adjustments.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43822 is the stronger canonical PR because it states the concrete runtime failure (`NameError: nn is not defined`), and its patch is broader and more complete around import/annotation handling in the same module.", "confidence": 0.68, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43812", "reason": "Same file and shared issue target, but the fixes are not clearly the same code-path problem: 43812 is about importing `torch.nn` for type hints when PyTorch is unavailable, while 43822 centers on a `NameError` for `nn` under older PyTorch and includes additional import/annotation changes. Too different to merge as duplicates.", "right": "pull_request:43822"}], "summary": "Two PRs touch the same file and same tracking issue, but they appear to address different import/type-annotation failure modes in `src/transformers/integrations/accelerate.py` rather than one identical bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43812|pull_request:43822"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3314, "estimated_input_tokens": 1529, "item_count": 2, "node_count": 2, "serialized_chars": 6114, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:30:52Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "695899d766acc532a2113f0e4ffde3ae818c4564cd96160b52b62410bdfa2f40", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43830", "pull_request:45101"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "43830 is the better representative because it is a merged, completed model-support PR; 45101 is a separate open PR for a different model family.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43830", "reason": "Reject: the overlap is only generic Transformers registry/plumbing changes. The PRs implement support for different model families and do not appear to fix the same concrete code-path problem.", "right": "pull_request:45101"}], "summary": "These PRs both add new model families and touch shared auto-registration plumbing, but they target different underlying changes: Qwen3.5 support vs Nandi support. They are not duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43830|pull_request:45101"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3998, "estimated_input_tokens": 1871, "item_count": 3, "node_count": 3, "serialized_chars": 7481, "soft_pair_count": 3}, "cached_at": "2026-04-14T18:31:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "04a6e31bb9bd78619f87fc52a3541ae553853b3d25318618bce48a397145dd88", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43831", "pull_request:43832", "pull_request:43850"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:43831 is the strongest representative of the cluster because it is the merged PR and matches the exact same file-level change as the other two.", "canonical_issue_reason": null, "canonical_pr_reason": "pull_request:43831 is the best canonical PR: it is merged, has review/inbound activity, and implements the shared docs cleanup that the other two PRs repeat.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43831", "reason": "Same concrete docs change in the same two files, same issue target, and the diffs are effectively the same stale `pipeline()` example removal.", "right": "pull_request:43850"}, {"accept": true, "left": "pull_request:43832", "reason": "These are duplicates of the same documentation cleanup: same files, same target issue, and the patch content is the same removal of deprecated `pipeline()` examples.", "right": "pull_request:43850"}, {"accept": true, "left": "pull_request:43831", "reason": "Near-identical PRs with the same file edits and same issue target; 43832 is just an unmerged duplicate of the merged 43831 change.", "right": "pull_request:43832"}], "summary": "All three pull requests are near-identical docs-only removals of stale `pipeline()` examples in the summarization/translation task pages, all targeting the same issue. They are duplicates of the same underlying documentation change, with 43831 the best canonical PR because it is the merged, reviewed implementation."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is grounded in the packet: all three PRs target the same issue and touch the same two docs files, and 43831 is a reasonable canonical choice because it is merged and has the most activity. The soft-edge accepts are conservative enough given the shared issue/files and strong diff similarity."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:43831|pull_request:43850", "pull_request:43832|pull_request:43850", "pull_request:43831|pull_request:43832"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2206, "estimated_input_tokens": 975, "item_count": 2, "node_count": 2, "serialized_chars": 3900, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:32:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "28c7e68a2e01e9ac795747fc38d094ac70a94fd3d425b56a74c0b5fe6eb69247", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43833", "pull_request:43839"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43839 is the best representative: it is merged, targets the exact bug, and provides the more complete fix by isolating the unsupported op from autocast.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43839 is the better canonical PR because it is already merged and directly addresses the reported `grouped_mm` autocast mismatch in the same code path.", "confidence": 0.87, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43833", "reason": "Same underlying bug and same file/issue target, but the fixes differ materially (`input.to(weight.dtype)` vs disabling autocast around `torch._grouped_mm`), so they are not the same PR-level change and would not plausibly be merged as one code change.", "right": "pull_request:43839"}], "summary": "Both PRs target the same autocast dtype mismatch in `src/transformers/integrations/moe.py` for `torch._grouped_mm`, but they propose different fixes: one casts the input to `weight.dtype`, the other disables autocast around the op. They share the same issue, but I would not treat them as duplicates at the PR level because they are alternative implementations rather than the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43833|pull_request:43839"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2030, "estimated_input_tokens": 887, "item_count": 2, "node_count": 2, "serialized_chars": 3548, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:32:23Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f7d71f9b488c6c01aa571bf45e0ec2ebbd33d0c77dd36162f2c1eaeed231a4df", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43871", "pull_request:43966"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43966 is the best representative because it is merged, addresses the target issue directly, and is the more polished version of the same fix.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43966 is the merged PR and contains the same core fix as 43871, with a clearer docstring and equivalent implementation.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43871", "reason": "Same concrete bug in `dot_natural_key` causing int/string comparison failures during sorting; the diffs are effectively the same fix and could plausibly be unified into one PR.", "right": "pull_request:43966"}], "summary": "Both pull requests fix the same bug in `dot_natural_key` for mixed-type state-dict key sorting; PR 43966 is the merged, slightly refined version of the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43871|pull_request:43966"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2580, "estimated_input_tokens": 1162, "item_count": 2, "node_count": 2, "serialized_chars": 4647, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:32:36Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "095b45a84593c5c2065a840bcf473c15fa69f612b8b0cfb36f7d01603a9f0973", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43876", "pull_request:43877"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43876 is the strongest representative of the cluster\u2019s fix-style changes because it has a clear single bug, focused diff, and direct title/body alignment.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43876 is the more self-contained change: it targets one concrete modular-conversion/config default bug with tightly scoped model and test edits.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43876", "reason": "Different code paths and failures: config default cleanup for GlmMoeDsa vs missing fast image patch-counter method in GLM/Ernie image processors. Same broad Transformers area, but not the same bug or mergeable as one PR.", "right": "pull_request:43877"}], "summary": "Two unrelated PRs in the same broad model area: one fixes GlmMoeDsa config defaults, the other restores a missing fast image patch counter across GLM/Ernie VL processors. They do not appear to address the same underlying bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43876|pull_request:43877"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2358, "estimated_input_tokens": 1051, "item_count": 2, "node_count": 2, "serialized_chars": 4202, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:32:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "148661d038a3c8b52a3047c64c5d8f17b1f18a299714ea8f7319a1162298a536", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43890", "pull_request:43891"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "43891 is the more representative coverage-focused test extension in this cluster; 43890 is a separate, narrower regression guard for BART.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR: the changes target different model areas and different test goals (BART memory regression vs. RAG retriever coverage).", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43890", "reason": "Different subsystems and different purposes: BART memory regression test vs. RAG retriever smoke test. They do not look like the same underlying change.", "right": "pull_request:43891"}], "summary": "These are two unrelated test-only PRs: one adds a BART memory regression guard, the other extends RAG retriever smoke coverage. They do not appear to fix the same bug or change the same code path."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43890|pull_request:43891"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2694, "estimated_input_tokens": 1219, "item_count": 2, "node_count": 2, "serialized_chars": 4874, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:33:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e9ed387f3d8b68b370d568787f5b9961da76185d834564f53fa0f95da9446b6c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43925", "pull_request:43941"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43941 is the best representative of the cluster since its title and diff show the main concrete bug pattern being fixed (Windows UnicodeDecodeError in examples) and it covers multiple affected files. PR #43925 is related but narrower and less representative of the underlying user-visible issue.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43941 is the better canonical PR because it addresses the broader, user-facing Windows UnicodeDecodeError across multiple example scripts and more directly matches the stated problem. PR #43925 is a narrow CI-script encoding fix and is not a substitute for the example-script bugfix.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43925", "reason": "Both add UTF-8 encodings, but they fix different artifacts and code paths. One is for CircleCI generation/parsing scripts, the other for several PyTorch examples producing files on Windows. They are not the same concrete bug or a single mergeable PR.", "right": "pull_request:43941"}], "summary": "Both PRs address Windows text-encoding issues by adding explicit UTF-8 file opens, but they target different code paths: CircleCI helper scripts vs. multiple PyTorch example scripts. They are similar in theme, not duplicates of the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43925|pull_request:43941"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2418, "estimated_input_tokens": 1081, "item_count": 2, "node_count": 2, "serialized_chars": 4321, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:33:28Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4466e97629d980566358fa7194193a481a64013425df86b0342c426fa5d51ebd", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43928", "pull_request:44362"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43928 is the best overall representative because it is a clearer end-user bug fix with direct serialization impact and tighter scope than the tokenizer refactor fix.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43928 is the stronger representative PR: it addresses a concrete, well-scoped config persistence bug, includes tests, and has an explicit issue link.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43928", "reason": "Different components and failure modes: Dia config token ID serialization vs. MLuke tokenizer attribute usage. No shared code path or same concrete bug.", "right": "pull_request:44362"}], "summary": "These two PRs are unrelated: one fixes Dia config token IDs being preserved across save/load, while the other fixes an MLukeTokenizer AttributeError after a refactor. They do not look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43928|pull_request:44362"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3392, "estimated_input_tokens": 1568, "item_count": 3, "node_count": 3, "serialized_chars": 6269, "soft_pair_count": 2}, "cached_at": "2026-04-14T18:33:44Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "11c072aacf9ee46337f4b806dae14cf575438b71e803739a6d1c6c9ca77e0a72", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43934", "pull_request:43958", "pull_request:44480"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43934 is the best cluster representative: it introduces a concrete new quantization backend and has the broadest, most self-contained change scope.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43934 is the strongest representative because it is the only substantive product/code feature change, with a clear implementation plus docs/tests, unlike the Dockerfile-only CI maintenance PRs.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43934", "reason": "Metal quantization implementation vs. CI Dockerfile change for kernels; different code paths and purposes.", "right": "pull_request:43958"}, {"accept": false, "left": "pull_request:43958", "reason": "Both touch CI Dockerfiles, but one installs kernels and the other installs diffusers; unrelated dependency updates, not the same fix/change.", "right": "pull_request:44480"}], "summary": "Three unrelated pull requests: one adds Metal quantization support, one updates a quantization CI Dockerfile to install kernels, and one adds diffusers to a different CI Dockerfile. The similarity is only broad quantization/CI context, not the same change or bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43934|pull_request:43958", "pull_request:43958|pull_request:44480"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3220, "estimated_input_tokens": 1482, "item_count": 2, "node_count": 2, "serialized_chars": 5926, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:33:59Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f888f0c5b548327b1767a1c199f75ea8d6401e86573a91811baf4531c0c84f29", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44006", "pull_request:44069"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44069 is the strongest standalone PR here: it addresses a concrete float16 non-finite activation path and applies consistently across multiple affected model implementations.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44069 is the better cluster representative because it covers a broader shared numerical-stability pattern across many models and has the larger, more general code change.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44006", "reason": "Both are numerical-stability fixes, but they target different computations and model families: perplexity calculation with `xlogy` vs. finite-value clamping in forward passes. They would not plausibly be merged into one PR.", "right": "pull_request:44069"}], "summary": "Two merged PRs in transformer model code that both improve numerical stability, but they fix different code paths and are not duplicates: one swaps in `torch.xlogy` for perplexity in UniSpeech/Wav2Vec2-family quantizers, the other broadens float16 finiteness checks to clamp invalid activations across many seq2seq/detection models."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44006|pull_request:44069"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2442, "estimated_input_tokens": 1093, "item_count": 2, "node_count": 2, "serialized_chars": 4370, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:34:51Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ea8aab156bacf56e9ab3aa86165dbca94f4da817a1f6f4de6076006168958fd3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44064", "pull_request:44124"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44124 is the best representative because it cleanly introduces the new option and the end-of-training evaluation behavior without the extra trainer-loop restructuring shown in #44064.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44124 most directly implements the final-eval feature by adding `eval_on_end` to `TrainingArguments` and invoking evaluation at the end of `_inner_training_loop`; it reads like the clearest self-contained change.", "confidence": 0.77, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44064", "reason": "Both PRs are about the same underlying `eval_on_end` final-evaluation feature in `trainer.py`/`training_args.py`; the shared files and titles indicate overlapping code-path changes, so this is a likely duplicate pair.", "right": "pull_request:44124"}], "summary": "Two closed PRs appear to target the same Trainer feature: adding `eval_on_end` / final evaluation after training. PR #44124 is the cleaner, more direct implementation; #44064 looks like a broader trainer-loop refactor around the same goal."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is grounded in the packet: both PRs are closed, touch the same files, and their titles/diffs point to the same `eval_on_end` final-evaluation feature. The soft-edge duplicate call is conservative and supported by the evidence. PR #44124 is reasonably described as the cleaner, more direct implementation, and nothing in the packet suggests the pair is unrelated or non-mergeable."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:44064|pull_request:44124"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 13660, "estimated_input_tokens": 6702, "item_count": 10, "node_count": 10, "serialized_chars": 26805, "soft_pair_count": 24}, "cached_at": "2026-04-14T18:36:02Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "82205d6853b01ea80a3faf11f4a799bcfe7c8e67bd7a85217b5498f4441ac2c8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44107", "pull_request:44108", "pull_request:44109", "pull_request:44110", "pull_request:44111", "pull_request:44133", "pull_request:44134", "pull_request:44135", "pull_request:44136", "pull_request:44137"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "pull_request:44136 is the best representative of the shared refactor pattern because it is the broadest and most complete PR in the set, touching two related model files.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR across the whole cluster: these are separate model-specific refactors. If one representative is needed, pull_request:44136 is the broadest example because it includes the vitdet change plus an additional lw_detr refactor.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44109", "reason": "Different model files and different refactors; only the decorator pattern is shared.", "right": "pull_request:44136"}, {"accept": false, "left": "pull_request:44108", "reason": "44136 is a broader multi-file PR; not the same single change as the vitdet-only PR.", "right": "pull_request:44136"}, {"accept": true, "left": "pull_request:44107", "reason": "Same MRA file and essentially the same output-tracing refactor.", "right": "pull_request:44137"}, {"accept": true, "left": "pull_request:44110", "reason": "Same TVP file with the same capture_outputs refactor; the diffs line up closely.", "right": "pull_request:44134"}, {"accept": true, "left": "pull_request:44109", "reason": "Same HGNetV2 model files and the same refactor pattern; appears to be the same change.", "right": "pull_request:44135"}, {"accept": true, "left": "pull_request:44111", "reason": "Same PoolFormer file and same output-tracing refactor; likely duplicate PRs.", "right": "pull_request:44133"}, {"accept": false, "left": "pull_request:44136", "reason": "Different models and different code paths; similar refactor theme only.", "right": "pull_request:44137"}, {"accept": false, "left": "pull_request:44107", "reason": "MRA refactor vs vitdet/lw_detr refactor; not the same underlying change.", "right": "pull_request:44136"}, {"accept": false, "left": "pull_request:44107", "reason": "Different model implementations; same theme but not the same bug/change.", "right": "pull_request:44108"}, {"accept": false, "left": "pull_request:44133", "reason": "PoolFormer and vitdet/lw_detr are separate model refactors, not duplicates.", "right": "pull_request:44136"}, {"accept": false, "left": "pull_request:44134", "reason": "TVP and vitdet/lw_detr are unrelated model-specific changes.", "right": "pull_request:44136"}, {"accept": false, "left": "pull_request:44108", "reason": "VitDet refactor vs MRA refactor; different code paths.", "right": "pull_request:44137"}, {"accept": false, "left": "pull_request:44108", "reason": "VitDet and PoolFormer are different models; only the refactor style matches.", "right": "pull_request:44111"}, {"accept": false, "left": "pull_request:44108", "reason": "VitDet and TVP are separate changes in different files/models.", "right": "pull_request:44110"}, {"accept": false, "left": "pull_request:44133", "reason": "PoolFormer and MRA are unrelated model refactors.", "right": "pull_request:44137"}, {"accept": false, "left": "pull_request:44134", "reason": "TVP and MRA touch different model code paths.", "right": "pull_request:44137"}, {"accept": false, "left": "pull_request:44133", "reason": "Different model files and different refactors; not the same change.", "right": "pull_request:44134"}, {"accept": false, "left": "pull_request:44107", "reason": "MRA and PoolFormer are distinct model implementations.", "right": "pull_request:44111"}, {"accept": false, "left": "pull_request:44107", "reason": "Different model code paths; only the output-tracing theme is shared.", "right": "pull_request:44110"}, {"accept": false, "left": "pull_request:44108", "reason": "VitDet and TVP are separate model-specific refactors.", "right": "pull_request:44134"}, {"accept": false, "left": "pull_request:44133", "reason": "PoolFormer and HGNetV2 are different models, so this is not a duplicate.", "right": "pull_request:44135"}, {"accept": false, "left": "pull_request:44110", "reason": "TVP and PoolFormer are different model code paths.", "right": "pull_request:44111"}, {"accept": false, "left": "pull_request:44110", "reason": "TVP refactor vs vitdet/lw_detr refactor; not the same underlying change.", "right": "pull_request:44136"}, {"accept": false, "left": "pull_request:44110", "reason": "Different models and different concrete changes.", "right": "pull_request:44133"}], "summary": "This cluster is mostly a batch of similar output-tracing refactor PRs across different model files. Only the same-file same-diff pairs look like true duplicates; the cross-model pairings are just pattern-similar refactors."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44109|pull_request:44136", "pull_request:44108|pull_request:44136", "pull_request:44107|pull_request:44137", "pull_request:44110|pull_request:44134", "pull_request:44109|pull_request:44135", "pull_request:44111|pull_request:44133", "pull_request:44136|pull_request:44137", "pull_request:44107|pull_request:44136", "pull_request:44107|pull_request:44108", "pull_request:44133|pull_request:44136", "pull_request:44134|pull_request:44136", "pull_request:44108|pull_request:44137", "pull_request:44108|pull_request:44111", "pull_request:44108|pull_request:44110", "pull_request:44133|pull_request:44137", "pull_request:44134|pull_request:44137", "pull_request:44133|pull_request:44134", "pull_request:44107|pull_request:44111", "pull_request:44107|pull_request:44110", "pull_request:44108|pull_request:44134", "pull_request:44133|pull_request:44135", "pull_request:44110|pull_request:44111", "pull_request:44110|pull_request:44136", "pull_request:44110|pull_request:44133"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 8192, "estimated_input_tokens": 3968, "item_count": 7, "node_count": 7, "serialized_chars": 15870, "soft_pair_count": 8}, "cached_at": "2026-04-14T18:36:28Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5985f7f1516201ded83a14bb71fffedff1d41572c19dae3c1b4f4ff69094c823", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44108", "pull_request:44109", "pull_request:44111", "pull_request:44133", "pull_request:44134", "pull_request:44135", "pull_request:44136"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44111 is the most representative cluster member because it matches the recurring refactor pattern without spanning multiple unrelated models, making it a better anchor than broader or later variants.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44111 is the best canonical representative: it is a clean, single-model output-tracing refactor with the clearest standalone scope and more discussion activity than the nearly identical later PoolFormer PR 44133.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44135", "reason": "Different targets and code paths: HGNetV2 vs a PR spanning LW-DETR/VitDet. Same refactor theme, but not the same underlying change.", "right": "pull_request:44136"}, {"accept": false, "left": "pull_request:44111", "reason": "Both are output-tracing refactors, but for different model implementations (PoolFormer vs HGNetV2) with separate code paths and patches.", "right": "pull_request:44135"}, {"accept": false, "left": "pull_request:44109", "reason": "Same general decorator/capture_outputs refactor, but applied to different architectures and files; not one concrete bug fix.", "right": "pull_request:44111"}, {"accept": false, "left": "pull_request:44134", "reason": "TVP and HGNetV2 are unrelated model backends; these are parallel refactors, not duplicate fixes.", "right": "pull_request:44135"}, {"accept": false, "left": "pull_request:44111", "reason": "PoolFormer vs TVP, different model code paths and output handling details; not plausibly one merged PR for the same bug.", "right": "pull_request:44134"}, {"accept": false, "left": "pull_request:44111", "reason": "VitDet/LW-DETR and PoolFormer are distinct implementations; shared refactor pattern is too broad to treat as the same change.", "right": "pull_request:44136"}, {"accept": false, "left": "pull_request:44109", "reason": "HGNetV2 vs PoolFormer are separate model-specific refactors. Similarity is thematic, not a duplicate underlying fix.", "right": "pull_request:44133"}, {"accept": false, "left": "pull_request:44108", "reason": "VitDet and HGNetV2 patches touch different model files and different forward/output plumbing; same modernization theme only.", "right": "pull_request:44109"}], "summary": "A cluster of model-specific refactor PRs to adopt output tracing/capture_outputs across different vision backbones. They share a common refactor theme, but the concrete code changes are model-local and not mergeable as duplicates; no issue artifacts are present. One near-duplicate PoolFormer PR exists in the set, but the listed soft edges do not include that exact pair."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44135|pull_request:44136", "pull_request:44111|pull_request:44135", "pull_request:44109|pull_request:44111", "pull_request:44134|pull_request:44135", "pull_request:44111|pull_request:44134", "pull_request:44111|pull_request:44136", "pull_request:44109|pull_request:44133", "pull_request:44108|pull_request:44109"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3610, "estimated_input_tokens": 1677, "item_count": 2, "node_count": 2, "serialized_chars": 6706, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:36:42Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "161db805b84ef626e25886fb4e2e7606b0cabf4f3ec5049151e435b8123c20b3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44130", "pull_request:44181"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44181 is the best representative because it covers the core code-path and has the wider blast radius; #44130 is a narrower generation-side follow-up.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44181 is the broader, more central change around removing cache positions from core cache/masking APIs and model integrations.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44130", "reason": "Related cache_position work, but #44130 changes generation candidate/input prep while #44181 removes cache positions from core cache APIs; not the same underlying fix and not mergeable as one PR.", "right": "pull_request:44181"}], "summary": "Both PRs target the broader cache_position cleanup effort, but they fix different layers: #44130 is a generation/input-preparation refactor, while #44181 is the core cache/masking API removal. Similar theme, not the same concrete change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44130|pull_request:44181"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5554, "estimated_input_tokens": 2649, "item_count": 4, "node_count": 4, "serialized_chars": 10593, "soft_pair_count": 6}, "cached_at": "2026-04-14T18:40:52Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b51afe08d45164de47bbdfeb18545b78a471538347ef801acb3e2b0096436d62", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44182", "pull_request:44211", "pull_request:44216", "pull_request:44249"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44249 is the best single PR to anchor the cluster because it directly implements the compatibility behavior in the affected code path and is more explicit/robust than the other variants.", "canonical_issue_reason": null, "canonical_pr_reason": "44249 has the cleanest, most complete implementation of the same compatibility fix: it adds `grouped_entities` handling, enforces mutual exclusion with `aggregation_strategy`, and maps to the intended enum values.", "confidence": 0.87, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44211", "reason": "Both PRs implement backward compatibility for deprecated `grouped_entities` in `token_classification.py` and target the same underlying code-path change; they are plausible merge alternatives for one fix.", "right": "pull_request:44249"}, {"accept": false, "left": "pull_request:44182", "reason": "44182 is primarily a docs/reference removal update and does not implement the same concrete compatibility behavior as 44249.", "right": "pull_request:44249"}, {"accept": false, "left": "pull_request:44211", "reason": "Despite sharing the same file and issue target, the available evidence is too weak and the PR intent/title is inconsistent; do not treat as the same concrete change.", "right": "pull_request:44216"}, {"accept": false, "left": "pull_request:44216", "reason": "The diff snippets resemble the same area, but the PR intent is unclear and the implementations are not clearly the same exact fix; too risky to merge as duplicates.", "right": "pull_request:44249"}, {"accept": false, "left": "pull_request:44182", "reason": "44182 removes mentions of `grouped_entities`, while 44211 adds backward compatibility for it; these are different change types and not the same bug fix.", "right": "pull_request:44211"}, {"accept": false, "left": "pull_request:44182", "reason": "44182 is documentation/reference cleanup, whereas 44216 appears to touch runtime parameter handling; they are not clearly the same underlying change.", "right": "pull_request:44216"}], "summary": "The cluster centers on backward compatibility for the deprecated `grouped_entities` parameter in `token_classification.py`. PR 44249 is the strongest canonical choice; 44211 is a very close alternate implementation, while 44182 is mostly docs/reference cleanup and 44216 is too ambiguous from the title/diff preview to treat as the same change with confidence."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44211|pull_request:44249", "pull_request:44182|pull_request:44249", "pull_request:44211|pull_request:44216", "pull_request:44216|pull_request:44249", "pull_request:44182|pull_request:44211", "pull_request:44182|pull_request:44216"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3660, "estimated_input_tokens": 1702, "item_count": 3, "node_count": 3, "serialized_chars": 6805, "soft_pair_count": 3}, "cached_at": "2026-04-14T18:41:37Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "972291d8e3f8811e084f2df3207f1d56292dc3f94e7b806d9aca78e52174fe5a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44187", "pull_request:44204", "pull_request:44209"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44187 is the strongest representative because it is merged and includes the most complete concrete fix for the tokenizer/NER regressions, not just the smaller attribute-only subset.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44187 is the best canonical PR: it is merged, and it covers the attribute bug plus the max_length/truncation-related tokenizer fix in one complete change set.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44204", "reason": "They share the same file and issue target, but fix different bugs: #44204 only stores the tokenizer attribute, while #44209 fixes batched NER crash logic by calling sequence_ids()/word_ids().", "right": "pull_request:44209"}, {"accept": false, "left": "pull_request:44187", "reason": "Different concrete changes and code paths: #44187 adds tokenizer state/max_length handling, while #44209 fixes the batched word-label extraction bug using sequence_ids()/word_ids().", "right": "pull_request:44209"}, {"accept": true, "left": "pull_request:44187", "reason": "#44204 is a strict subset of #44187's tokenizer-state fix: both address the same missing only_label_first_subword attribute bug, and #44187 simply adds an additional related tokenizer change.", "right": "pull_request:44204"}], "summary": "All three PRs touch the same LayoutLMv2 tokenizer, but only one pair is effectively the same fix: the standalone attribute-storage patch is a subset of the merged broader tokenizer fix. The batched NER/word_ids crash fix is a different code path and should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is grounded in the packet: all three PRs target the same LayoutLMv2 tokenizer area and issue, 44187 is merged, 44204 is a clear subset of 44187, and 44209 is a distinct batched NER/word_ids fix. The soft-edge verdicts are conservative and do not overstate equivalence."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:44204|pull_request:44209", "pull_request:44187|pull_request:44209", "pull_request:44187|pull_request:44204"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1848, "estimated_input_tokens": 796, "item_count": 2, "node_count": 2, "serialized_chars": 3184, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:42:48Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2397260d18ac49c501d24b82da1f64ffa88df3419055a41ebb7a3ce0e769baec", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44218", "pull_request:44219"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44218 is a slightly cleaner representative only because it is a single, concrete command typo fix; however, it is not a duplicate of 44219.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR for a duplicate set: 44218 fixes a sentencepiece install-command typo in one doc, while 44219 fixes a different wording typo in another doc.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44218", "reason": "Different typo fixes in different documentation files; they touch separate text and do not appear to fix the same underlying bug or change.", "right": "pull_request:44219"}], "summary": "These are two separate Italian documentation typo fixes in different files, not the same underlying change. The soft similarity is only at the broad docs/typo level, so they should not be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44218|pull_request:44219"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2560, "estimated_input_tokens": 1152, "item_count": 2, "node_count": 2, "serialized_chars": 4607, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:43:23Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f1e73ed4ee22b6fff1382a65274cb93a00737fab0a05e88568d77976ac511776", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44234", "pull_request:45143"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45143 is the best representative of the cluster because it contains the main functional change; PR #44234 is a narrow test/type-hint PR for a specific model.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45143 is the more substantial and central change: it introduces parse_response support and updates pipeline postprocessing behavior across multiple files.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44234", "reason": "Different underlying changes: #44234 only adds Phi-4 multimodal processing tests and a typing fix, while #45143 adds generic response parsing behavior in processor/pipeline code. They are not the same bug or mergeable as one PR.", "right": "pull_request:45143"}], "summary": "Two unrelated PRs in the processing/pipeline area: one is a small Phi-4 multimodal typing/test cleanup, the other adds response parsing support to processors and updates pipeline postprocessing. They do not fix the same concrete bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44234|pull_request:45143"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6114, "estimated_input_tokens": 2929, "item_count": 4, "node_count": 4, "serialized_chars": 11713, "soft_pair_count": 6}, "cached_at": "2026-04-14T18:43:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b2600004b3464bf3cd207a2ff3d2fe659a90e7352740e73976ebe43f91bd2976", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44243", "pull_request:44274", "pull_request:44284", "pull_request:44586"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44284 is the best overall representative for this cluster because it most directly and cleanly addresses the reported Mixtral aux-loss bug while remaining focused on the concrete fix.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44284 is the cleanest representative of the core bug fix: it directly targets the Mixtral auxiliary-loss path, is issue-linked, and includes test coverage without the extra breadth of the multi-model PR.", "confidence": 0.77, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44243", "reason": "Both touch Mixtral aux-loss logic, but #44586 is a much broader multi-model MoE change set; it is not the same concrete PR-level change as #44243.", "right": "pull_request:44586"}, {"accept": true, "left": "pull_request:44274", "reason": "These are near-duplicates for the same issue, same Mixtral modular code path, and same fix intent, with only minor packaging/test differences.", "right": "pull_request:44284"}, {"accept": false, "left": "pull_request:44284", "reason": "#44586 extends the pattern across many MoE models and files, so it is broader than the focused Mixtral fix in #44284 rather than a duplicate of it.", "right": "pull_request:44586"}, {"accept": false, "left": "pull_request:44274", "reason": "The overlap is only the shared issue pattern; #44586 is a broader multi-file, multi-model PR, not the same concrete change as #44274.", "right": "pull_request:44586"}, {"accept": true, "left": "pull_request:44243", "reason": "Both PRs address the same underlying Mixtral load-balancing/aux-loss bug when `output_router_logits=False`, and their fixes are compatible parts of one consolidated fix.", "right": "pull_request:44274"}, {"accept": true, "left": "pull_request:44243", "reason": "Same underlying Mixtral bug and same issue target; the two PRs differ mainly in implementation/file variant, so they look like duplicate attempts at the same fix.", "right": "pull_request:44284"}], "summary": "This cluster is centered on the same Mixtral aux-loss bug around `output_router_logits=False`, with two near-duplicate Mixtral-specific fix PRs and one broader multi-model MoE PR. The broader PR is related but not a duplicate of the narrower Mixtral-only changes."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44243|pull_request:44586", "pull_request:44274|pull_request:44284", "pull_request:44284|pull_request:44586", "pull_request:44274|pull_request:44586", "pull_request:44243|pull_request:44274", "pull_request:44243|pull_request:44284"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2372, "estimated_input_tokens": 1058, "item_count": 2, "node_count": 2, "serialized_chars": 4230, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:44:04Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "eaaadbcd04cfbb110aaca590aa41ba62da6c6ec4c25c0e57a21916e2057d394f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44245", "pull_request:44447"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44245 is the more self-contained backward-compatibility fix and has a clearer, concrete behavior change, but it is not a duplicate of the other PR.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR: these are separate fixes for different backward-compatibility mechanisms in different subsystems.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44245", "reason": "Different concrete problems and code paths: `image_processing_base.from_dict` applies extra kwargs to image processor instances, while `processing_utils` changes how hardcoded subprocessor class names are resolved. They share a broad backward-compatibility theme but are not the same fix and would not plausibly be merged into one PR.", "right": "pull_request:44447"}], "summary": "Two merged PRs both touch backward-compatibility behavior, but they fix different code paths in different files: image processor `from_dict` kwarg handling vs processor sub-processor class resolution. They are not duplicates of the same underlying bug/change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44245|pull_request:44447"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2292, "estimated_input_tokens": 1018, "item_count": 2, "node_count": 2, "serialized_chars": 4070, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:44:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "40531188c0479b993e7284d19e6ae629bd24894f8260ba03b8048557e805f4fc", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44254", "pull_request:44294"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44294 is the strongest standalone PR here: it is merged, narrowly scoped, and directly addresses the OLMo3 tokenizer backend selection bug.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44294 is the better representative because it is merged and makes a focused, model-specific fix for OLMo3 by selecting `TokenizersBackend` to preserve custom `pre_tokenizer` behavior.", "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44254", "reason": "Both concern custom pre-tokenizer preservation, but they change different layers and models: GPT-2 tokenizer construction vs. OLMo3 auto-tokenizer backend mapping. Different concrete fixes, so not a duplicate.", "right": "pull_request:44294"}], "summary": "These PRs are related thematically (preserving custom fast-tokenizer pre-tokenizers), but they fix different code paths: one updates GPT-2 tokenizer initialization to stop overwriting `tokenizer.json` settings, while the other changes OLMo3\u2019s auto-tokenizer backend mapping. They are not the same underlying change and would not naturally merge into one PR."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44254|pull_request:44294"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 9098, "estimated_input_tokens": 4421, "item_count": 7, "node_count": 7, "serialized_chars": 17684, "soft_pair_count": 7}, "cached_at": "2026-04-14T18:44:35Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "64ab054ae3073fe32c899d93236d2eb7733d57e66a2d9e19e5e52f15dfaa89f6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44267", "pull_request:44323", "pull_request:44324", "pull_request:44326", "pull_request:44787", "pull_request:44788", "pull_request:44807"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44323 is the best global PR to represent the duplicate pair: it is narrowly scoped, clearly tied to the tracking issue, and matches 44324 exactly. The remaining PRs are related docs cleanups but not the same underlying change.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44323 is the best canonical representative because it is the clearer, earlier copy of the duplicate tutorial update and includes the explicit issue target 18926. PR 44324 is the same change with the same files and should be treated as the duplicate.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44267", "reason": "Both are docs changes about document/question answering, but one updates the pipeline docstring and the other adds a tutorial example; they are different edits and not the same concrete change.", "right": "pull_request:44323"}, {"accept": true, "left": "pull_request:44323", "reason": "Same title, same explicit issue target, same files, and the diff is effectively identical; these are duplicate PRs for the same tutorial documentation update.", "right": "pull_request:44324"}, {"accept": false, "left": "pull_request:44323", "reason": "44323 adds a document-question-answering example, while 44326 adds a zero-shot classification example. They are unrelated tutorial additions.", "right": "pull_request:44326"}, {"accept": false, "left": "pull_request:44323", "reason": "One PR adds a new document QA example, while the other removes an outdated question-answering example from a different location. Same broad topic, but not the same change.", "right": "pull_request:44788"}, {"accept": false, "left": "pull_request:44324", "reason": "Document QA tutorial content and zero-shot classification tutorial content are different changes to different examples, not duplicates.", "right": "pull_request:44326"}, {"accept": false, "left": "pull_request:44787", "reason": "Both remove question-answering references from docs, but they affect different documentation pages/locales and are separate cleanup edits rather than one mergeable change.", "right": "pull_request:44807"}, {"accept": false, "left": "pull_request:44788", "reason": "These are separate documentation removals in different files: one updates the pipeline API docs, the other updates quicktour tables. Related, but not the same underlying PR.", "right": "pull_request:44807"}], "summary": "This cluster is mostly documentation PRs around the question-answering pipeline, but only PR 44323 and 44324 are true duplicates of the same tutorial change. The other PRs touch different docs pages or make different content changes and should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44267|pull_request:44323", "pull_request:44323|pull_request:44324", "pull_request:44323|pull_request:44326", "pull_request:44323|pull_request:44788", "pull_request:44324|pull_request:44326", "pull_request:44787|pull_request:44807", "pull_request:44788|pull_request:44807"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2880, "estimated_input_tokens": 1312, "item_count": 3, "node_count": 3, "serialized_chars": 5246, "soft_pair_count": 2}, "cached_at": "2026-04-14T18:44:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0bc58ae4ade0cd4e3c6cc78bb3d47141259cc6818e63e739a3c584e38a8655c6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44272", "pull_request:44288", "pull_request:45069"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44272 is the best overall representative because it addresses the most specific shared bug in the cluster and is the closest true duplicate counterpart to 44288; 45069 is related but fixes a different code path in rope validation.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44272 is the clearest representative of the duplicate pair: it fixes the concrete TypeError in convert_rope_params_to_dict for list-valued ignore_keys, and 44288 is just an alternate implementation of the same fix.", "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44272", "reason": "Both touch modeling_rope_utils.py and mention ignore_keys-as-list TypeErrors, but they fix different code paths: convert_rope_params_to_dict vs _check_received_keys. Related theme, not the same underlying change.", "right": "pull_request:45069"}, {"accept": false, "left": "pull_request:44288", "reason": "Same file and similar symptom, but 44288 patches convert_rope_params_to_dict while 45069 patches rope validation received_keys filtering. These are separate bugs, not one mergeable duplicate.", "right": "pull_request:45069"}, {"accept": true, "left": "pull_request:44272", "reason": "Both target the same function and the same underlying bug: handling ignore_keys_at_rope_validation when it may be a list. The implementation differs slightly, but the code-path problem is the same and could plausibly be merged into one PR.", "right": "pull_request:44288"}], "summary": "Two PRs are near-duplicates for the same rope-utils list/set TypeError in convert_rope_params_to_dict, while the third is a related but separate validation-path fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44272|pull_request:45069", "pull_request:44288|pull_request:45069"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5854, "estimated_input_tokens": 2799, "item_count": 4, "node_count": 4, "serialized_chars": 11195, "soft_pair_count": 6}, "cached_at": "2026-04-14T18:45:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1296b07b748b6dd0c8dff138fe6dfef5b8e7c832a7b2cb7e5cbb27c75dac4762", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44289", "pull_request:44312", "pull_request:44725", "pull_request:44809"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44289 is the best overall representative for the cluster\u2019s clear duplicate subset: it matches #44312 exactly in title, files, and patch, while the other PRs are not safe duplicates.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44289 is the best canonical representative because it is one side of the only clear duplicate pair and its scope is cleanly described by the title and diff; #44312 is effectively identical.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44289", "reason": "Same title, same files, and the diff preview is identical; this is a true duplicate.", "right": "pull_request:44312"}, {"accept": false, "left": "pull_request:44289", "reason": "They target different code paths: FlexOlmo/Olmo2/Olmo3 cleanup vs a broad PR centered on Fuyu/flex_attention-related changes. Same broad pattern, but not the same change.", "right": "pull_request:44725"}, {"accept": false, "left": "pull_request:44289", "reason": "Different scopes and files: one is FlexOlmo/Olmo2/Olmo3, the other is Fuyu image processing plus XLNet. Not the same underlying bug/change.", "right": "pull_request:44809"}, {"accept": false, "left": "pull_request:44312", "reason": "Although both are cleanup-style PRs, the concrete code paths and file sets do not match; this is not a safe duplicate.", "right": "pull_request:44725"}, {"accept": false, "left": "pull_request:44312", "reason": "Different concrete patches and subsystems; same general exception-handling theme is too broad to treat as duplicate.", "right": "pull_request:44809"}, {"accept": false, "left": "pull_request:44725", "reason": "Both touch Fuyu, but the diffs suggest different larger sweeps and additional unrelated changes. They are not clearly the same PR-worthy bug fix.", "right": "pull_request:44809"}], "summary": "The cluster splits into two unrelated cleanup themes: one exact duplicate pair about replacing bare except clauses in FlexOlmo/Olmo2/Olmo3, and two broader PRs that both touch Fuyu but appear to cover different, larger exception-handling sweeps. Only the identical pair should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44289|pull_request:44312", "pull_request:44289|pull_request:44725", "pull_request:44289|pull_request:44809", "pull_request:44312|pull_request:44725", "pull_request:44312|pull_request:44809", "pull_request:44725|pull_request:44809"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2324, "estimated_input_tokens": 1034, "item_count": 2, "node_count": 2, "serialized_chars": 4135, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:45:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "408d8eb842f69de620c924297af82f6b1bfbe85eb826569697e1239f0f426c4e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44316", "pull_request:44345"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44345 is the best representative of the cluster because it is the more complete fix for the same underlying verbosity bug and is explicitly framed as fixing #44303.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44345 is the better canonical PR: it directly targets #44303, keeps the same code path, adds the TTY-specific guard that explains the CI/log-file issue, and includes tests.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44316", "reason": "Same underlying bug in the same `convert_and_load_state_dict_in_model` tqdm logging path; the later PR is a refinement of the earlier fix and they could plausibly be merged into one change.", "right": "pull_request:44345"}], "summary": "Both PRs address the same weight-loading tqdm verbosity problem in `core_model_loading.py` for issue #44303. PR #44345 is the more complete follow-up, adding the TTY guard and tests, while #44316 is a simpler earlier variant."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44316|pull_request:44345"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 4156, "estimated_input_tokens": 1950, "item_count": 3, "node_count": 3, "serialized_chars": 7797, "soft_pair_count": 3}, "cached_at": "2026-04-14T18:45:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "42925ab64e791cda23cb4759f06a3a7213393b261fb73d3cd8c7aec68a447a04", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44317", "pull_request:44346", "pull_request:44585"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44317 is the strongest representative of the cluster because it directly targets the shared bug with the broadest relevant scope and is essentially the template that PR 44346 duplicates.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44317 is the best canonical PR: it is the earliest and most complete of the near-identical fixes, covering the same Deepseek MLA layernorm eps bug across both V2 and V3 with a clean, focused diff.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44317", "reason": "Same fix, same affected DeepseekV2/V3 layernorm constructors, same issue target, and the diffs are effectively identical.", "right": "pull_request:44346"}, {"accept": false, "left": "pull_request:44317", "reason": "Related bug pattern, but not the same PR change: 44585 is DeepseekV3-only and also touches additional model files, so it is broader and not a clean duplicate of 44317.", "right": "pull_request:44585"}, {"accept": false, "left": "pull_request:44346", "reason": "They overlap on the general eps omission theme, but 44346 is the V2/V3 fix while 44585 is a different V3-centric patch with broader file coverage.", "right": "pull_request:44585"}], "summary": "Two PRs are near-identical fixes for DeepseekV2/V3 MLA layernorm eps handling; the third is a related but broader DeepseekV3-only change and not a duplicate of the earlier pair."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44317|pull_request:44346", "pull_request:44317|pull_request:44585", "pull_request:44346|pull_request:44585"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2372, "estimated_input_tokens": 1058, "item_count": 2, "node_count": 2, "serialized_chars": 4230, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:46:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5e71e899f337caeac31640a931365d9a7a5d751a056d835953a77fad2a70cbf5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44332", "pull_request:45269"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44332 is the best representative for the cluster since it is merged, self-contained, and the broader completed change among the two typo-only PRs.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44332 is the better canonical PR because it is already merged and represents a completed typo-fix change, whereas #45269 is still open and touches a separate file/function.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44332", "reason": "Both are typo-fix PRs, but they edit different files and different text/code symbols with no shared concrete bug or patchable code path; they should remain separate.", "right": "pull_request:45269"}], "summary": "Two typo-fix PRs in unrelated code paths: one merged PR for typos in PEFT/Dia comments, and one open PR fixing a misspelled output-capturing helper name and docstring in a different utility module. They are not the same change and should not be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44332|pull_request:45269"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2470, "estimated_input_tokens": 1107, "item_count": 2, "node_count": 2, "serialized_chars": 4427, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:46:36Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6e24559fc6b0cba1777ca7e6eda459d35015eaf4d6e7650e40d3ea4d071c2059", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44344", "pull_request:44427"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44427 is the strongest standalone fix for the underlying tokenizer_class serialization bug; it is broader and more directly addresses save_pretrained behavior than the Qwen3.5-specific mapping tweak in PR 44344.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44427 is the better representative because it fixes the root load/save behavior generically and includes tests, rather than only patching Qwen3.5 class mapping.", "confidence": 0.78, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44344", "reason": "Related to the same issue target, but the diffs implement different fixes in different code paths. 44344 changes Qwen3.5 auto-class mapping; 44427 preserves the original tokenizer_class through the tokenizer load/save flow. They are similar, but not the same concrete change and are not clearly mergeable as one duplicate PR.", "right": "pull_request:44427"}], "summary": "Two PRs address the same reported tokenizer_class mismatch around Qwen3.5 save_pretrained, but they do so with different fixes: one is a narrow auto-mapping adjustment and the other is a broader preservation of the original tokenizer_class in the load/save path."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44344|pull_request:44427"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3706, "estimated_input_tokens": 1725, "item_count": 3, "node_count": 3, "serialized_chars": 6900, "soft_pair_count": 3}, "cached_at": "2026-04-14T18:46:49Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ba21b2b7e958a0d29f8007bad82cff1b9655433d92c0eee821bcb936303133bc", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44354", "pull_request:44363", "pull_request:44584"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44584 is the strongest representative of the cluster: same concrete bug fix, merged, and narrowly scoped to the exact off-by-one boundary condition. It is the most suitable canonical PR for deduplication.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44584 is the best canonical PR because it is the merged change that directly fixes the boundary check in the production code path. The other two PRs are earlier duplicate attempts for the same issue.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44354", "reason": "Same underlying off-by-one bug in `decode_spans`, same file, same fix (`<` to `<=`), and both are straightforward to merge into one PR.", "right": "pull_request:44584"}, {"accept": true, "left": "pull_request:44354", "reason": "Both PRs fix the same concrete `decode_spans` boundary error and add regression tests for the same issue; they are clearly duplicate change attempts.", "right": "pull_request:44363"}, {"accept": true, "left": "pull_request:44363", "reason": "Identical code-path bug and identical boundary fix in `document_question_answering.py`; the PRs differ only in test organization, not in the underlying change.", "right": "pull_request:44584"}], "summary": "Three PRs all target the same off-by-one bug in `decode_spans` (`len(scores_flat) <= topk` instead of `< topk`) for issue #44327. PR 44584 is the merged, canonical implementation; 44354 and 44363 are duplicate unmerged attempts with the same fix and minor test placement differences."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44354|pull_request:44584", "pull_request:44354|pull_request:44363", "pull_request:44363|pull_request:44584"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1924, "estimated_input_tokens": 834, "item_count": 2, "node_count": 2, "serialized_chars": 3333, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:46:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "70fbc7f06cd59c72f48a50b61c7fdb8410ce5b4e48ecec3b21b80c731e817bd3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44372", "pull_request:44511"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44372 is the strongest overall representative because it is the more explicit statement of the same concrete fix and issue linkage, while the diff is effectively the same as #44511.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44372 is the better canonical representative: it is earlier, clearly targets the bug in `get_docstring_indentation_level`, and explicitly calls out Cython-compiled functions and built-ins as the failure case.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44372", "reason": "Same function, same file, same fallback behavior, and same issue target; the diffs differ only trivially in wording/order, so they are the same underlying change.", "right": "pull_request:44511"}], "summary": "Both items are near-duplicate pull requests that patch the same function in the same file with the same fallback for `inspect.getsource` failures on compiled/Cython functions."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44372|pull_request:44511"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3706, "estimated_input_tokens": 1725, "item_count": 3, "node_count": 3, "serialized_chars": 6899, "soft_pair_count": 3}, "cached_at": "2026-04-14T18:47:11Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9b95fa74e40925bc6f7cb979628cd668f7c6ae6f670c9bc9c4e0e08a7e3d03c5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44374", "pull_request:44547", "pull_request:44590"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44547 is the best representative because it most cleanly captures the underlying fix in the target file, with the same issue linkage and the most activity among the duplicate pair.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44547 is the best canonical PR: it targets the same file and same docstring correction as 44374, but appears more mature/reviewed and is the later, more representative instance of that change.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44374", "reason": "Same file, same issue target, and essentially the same docstring correction for position_ids; these are the same underlying change.", "right": "pull_request:44547"}, {"accept": false, "left": "pull_request:44374", "reason": "They only share a tracking issue and a broad docstring theme; this PR edits a different file with different wording/context, so it is not the same concrete change.", "right": "pull_request:44590"}, {"accept": false, "left": "pull_request:44547", "reason": "Same tracking issue is not enough here: the PRs touch different files and are distinct documentation edits rather than one mergeable code change.", "right": "pull_request:44590"}], "summary": "PRs 44374 and 44547 are near-identical docstring fixes in the same file and look like duplicates of the same change. PR 44590 is about a similar position_ids docstring, but it touches a different file and is broader/reworded rather than the same concrete edit."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44374|pull_request:44547", "pull_request:44374|pull_request:44590", "pull_request:44547|pull_request:44590"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3350, "estimated_input_tokens": 1547, "item_count": 3, "node_count": 3, "serialized_chars": 6185, "soft_pair_count": 2}, "cached_at": "2026-04-14T18:47:24Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "73e0b1e5b86e891d35cd9201436e494a0bcba6fd260a1efccfc2ba30355ee81a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44377", "pull_request:44498", "pull_request:44613"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44377 is the best representative PR because it targets the concrete code path change, whereas #44498 is a different TP backend integration and #44613 is documentation only.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44377 is the most substantive and feature-complete code change in the cluster, directly implementing tensor-parallel support for compressed tensors; the others are a backend init tweak and documentation updates.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44377", "reason": "Not the same change: #44377 modifies compressed-tensors TP logic in code, while #44613 only adds TP training docs/navigation.", "right": "pull_request:44613"}, {"accept": false, "left": "pull_request:44498", "reason": "Not the same underlying bug or fix: #44498 changes Neuron backend initialization for tensor parallelism, while #44613 is docs-only.", "right": "pull_request:44613"}], "summary": "The three PRs only share a broad tensor-parallelism theme. They do not describe the same underlying change: one implements TP support for compressed tensors, one adds Neuron backend initialization, and one is docs-only for TP training. No duplicate/merge candidate here."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44377|pull_request:44613", "pull_request:44498|pull_request:44613"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3174, "estimated_input_tokens": 1459, "item_count": 2, "node_count": 2, "serialized_chars": 5834, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:47:33Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "bd4cfe89ce312d0386515ce4df812524c2e3b795398bb1744755eaecbf7b64af", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44391", "pull_request:44392"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44392 is the best global PR candidate because it is the concrete, merged code fix. PR #44391 is much broader, spanning docs, auto mappings, new model files, and feature integration rather than the same specific bug.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44392 is the best canonical PR: it is merged and narrowly targets the ASR padding-cache implementation in the core model path.", "confidence": 0.87, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44391", "reason": "They overlap on VibeVoice ASR files, but #44391 is a broad feature/integration PR while #44392 is a specific padding-cache bugfix. Different underlying changes, so not a duplicate.", "right": "pull_request:44392"}], "summary": "These are related VibeVoice ASR pull requests, but they are not the same change: one is a broad model/docs addition and the other is a narrow ASR padding-cache fix. Treat them as separate PRs; the merged, focused fix is the better canonical PR."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44391|pull_request:44392"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 4796, "estimated_input_tokens": 2270, "item_count": 4, "node_count": 4, "serialized_chars": 9079, "soft_pair_count": 3}, "cached_at": "2026-04-14T18:47:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8066e1027b12378ea5afeda4529aeaa03f0f57d09284b24bdcd0f85694f0a07e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44399", "pull_request:45251", "pull_request:45368", "pull_request:45369"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45251 is the strongest global representative because it directly addresses the shared beam-search sampling failure in generation/utils and is the clearest tracking-fix submission in the cluster.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45251 is the best representative of the beam-search multinomial-limit bug cluster: it is the earlier PR, explicitly targets issue #45245, and fixes the exact generation/utils code path.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44399", "reason": "Different bugs in different areas: a Qwen3_5 position_ids typo vs streaming-serving tokenizer resolution.", "right": "pull_request:45368"}, {"accept": false, "left": "pull_request:45368", "reason": "Unrelated fixes: serving streamer tokenizer lookup vs beam-search multinomial CUDA-limit handling in generation.", "right": "pull_request:45369"}, {"accept": true, "left": "pull_request:45251", "reason": "Same underlying bug in generation/utils: beam-search sampling hits torch.multinomial's CUDA dimension limit; both target issue #45245 and patch the same code path.", "right": "pull_request:45369"}], "summary": "One real duplicate pair is the two generation PRs for the beam-search multinomial CUDA limit; the Qwen forward-pass fix and the serving tokenizer fix are unrelated to that bug and to each other."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The verdicts are grounded in the packet and stay conservative. The Qwen3_5 position_ids fix and the serving tokenizer-resolution fix are clearly unrelated. The two generation PRs both target issue #45245, touch the same file and code path, and address the same multinomial CUDA-limit bug, so treating them as a soft duplicate is supported."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:44399|pull_request:45368", "pull_request:45368|pull_request:45369", "pull_request:45251|pull_request:45369"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2218, "estimated_input_tokens": 981, "item_count": 2, "node_count": 2, "serialized_chars": 3921, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:48:11Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d40ed61e95fdc1f65e0169caa6f72510eed53cae3c27cf2080e3d3eb49aa6cb5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44424", "pull_request:44436"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44436 is the best overall fit because it is merged and provides the broader operational fix for non-text-only models, while #44424 is a narrower compatibility patch for a specific processor return type.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44436 is the stronger canonical representative: it is merged, directly addresses the multimodal continuous-batching limitation, and cleanly guards the unsupported path by falling back to regular generation.", "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44424", "reason": "Same broad feature area and same issue target, but different underlying fixes: #44424 patches string-vs-tensor handling inside continuous batching, while #44436 disables continuous batching for non-text-only models and falls back to regular generate. They are related but not the same concrete change.", "right": "pull_request:44436"}], "summary": "Both PRs are about continuous batching with multimodal/non-text models in `src/transformers/cli/serve.py`, but they fix different problems: one adds a fallback to regular generation for non-text-only models, the other handles `apply_chat_template` returning a string for multimodal processors. They are related, not duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44424|pull_request:44436"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2982, "estimated_input_tokens": 1363, "item_count": 2, "node_count": 2, "serialized_chars": 5451, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:48:52Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c33cf752c613331bbc9822e60b5b956a4ff77007af9fe31216098bbd913a302b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44467", "pull_request:44468"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44468 is the best representative of the cluster: it is already merged, narrowly scoped, and clearly addresses the placeholder-token replacement behavior without the broader tokenizer-conversion changes in 44467.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44468 is the better canonical PR because it is merged, focused on one concrete bug path, and directly replaces placeholder tokens via `added_tokens_decoder` with a small code change.", "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44467", "reason": "Related theme, but different fixes: 44467 adds broader SentencePiece/native conversion support across many tokenizer files, while 44468 specifically replaces placeholder tokens from `added_tokens_decoder` in `tokenization_utils_tokenizers.py`. They are not the same concrete bug/change and would not plausibly merge into one PR.", "right": "pull_request:44468"}], "summary": "Both PRs concern placeholder-token handling, but they target different code paths and scopes. PR 44468 is a narrow, merged fix in native tokenizer conversion; PR 44467 is a broader open PR modifying slow-tokenizer conversion and many model tokenizers. They are related, but not the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44467|pull_request:44468"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2392, "estimated_input_tokens": 1068, "item_count": 2, "node_count": 2, "serialized_chars": 4271, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:49:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a4cca482b3cde6a495ba63521e3093bd308988e3715f4c4d0b1956953d7dde82", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44504", "pull_request:44505"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44505 best represents the underlying change since it subsumes PR 44504's behavior and adds extra input-type coverage without changing the target code path.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44505 is the stronger canonical choice because it contains the broader, more robust validation logic (`None`, `Path`, string coercion, and missing-file handling) for the same `load_vocab` path-validation change.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44504", "reason": "Same function, same file, same user-visible change area (invalid vocab path error handling). PR 44505 is a superset of PR 44504, so they are effectively duplicate fixes for the same underlying bug and could plausibly be merged into one PR.", "right": "pull_request:44505"}], "summary": "Two near-identical PRs fix `load_vocab` in BERT tokenization to give clearer errors for invalid vocabulary paths; PR 44505 is the more complete version because it also supports `Path` and `None` checks."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44504|pull_request:44505"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6492, "estimated_input_tokens": 3118, "item_count": 3, "node_count": 3, "serialized_chars": 12469, "soft_pair_count": 2}, "cached_at": "2026-04-14T18:50:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b9c30e80b2d815b01a08b754f66a5985b32dc6b4dedef1e0219eb9ceb1ce1b0c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44510", "pull_request:44513", "pull_request:44533"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44513 is the strongest representative because it is merged and covers the concrete CLI-reference removal across the model docs; #44533 is the near-duplicate follow-up, while #44510 is a different docs issue.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44513 is the best canonical PR: it targets the removed `transformers run` CLI command cleanup, has the same file set as the later duplicate, and is already merged.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44510", "reason": "Both are documentation cleanups, but they address different underlying removals: pipeline-task references vs. the removed `transformers run` CLI command. They are not the same change and should not be merged as duplicates.", "right": "pull_request:44513"}, {"accept": true, "left": "pull_request:44513", "reason": "Same underlying docs change: both remove `transformers run` CLI references from the same model docs, with highly overlapping filenames and the same target issue #44512.", "right": "pull_request:44533"}], "summary": "Two docs-cleanup PRs are about the removed `transformers run` CLI command and are duplicates of the same change; the third PR is a separate docs cleanup for different pipeline-task references."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44510|pull_request:44513", "pull_request:44513|pull_request:44533"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1948, "estimated_input_tokens": 846, "item_count": 2, "node_count": 2, "serialized_chars": 3384, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:51:11Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f13715bd355037a9cc40e9bf7bd640b3374361c58328b273a01cd2317d31f9d6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44524", "pull_request:44525"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "Neither PR is a suitable canonical representative for the cluster because they address different code paths and should not be merged as duplicates.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44524", "reason": "Same file, but different underlying defects and code paths: AssistantTracker.is_active() state handling vs _parse_type_hint() Union[Any] parsing. They are not the same change and would not plausibly be one PR.", "right": "pull_request:44525"}], "summary": "The cluster contains two unrelated merged PRs that happen to modify the same file, but they fix different chat-template bugs: one corrects tracker activity state when lists are empty, the other prevents a KeyError in type-hint parsing for Union[Any]."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44524|pull_request:44525"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2640, "estimated_input_tokens": 1192, "item_count": 3, "node_count": 3, "serialized_chars": 4765, "soft_pair_count": 3}, "cached_at": "2026-04-14T18:51:16Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d318b7a3009efc28648193c578144bf5b8657566749afd0f19475fcf0d45ccbd", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44528", "pull_request:44529", "pull_request:44552"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#44529 is the best representative because it is merged and contains the same concrete code change as the duplicate #44528; #44552 is only a revert.", "canonical_issue_reason": null, "canonical_pr_reason": "#44529 is the merged PR with the exact same diff and file as #44528, so it is the best canonical PR for the actual change.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44528", "reason": "Same title, same file, and identical diff; #44529 is just the merged instance of the same change.", "right": "pull_request:44529"}, {"accept": false, "left": "pull_request:44528", "reason": "#44552 is a revert of the added line, so it is the inverse change rather than the same underlying fix.", "right": "pull_request:44552"}, {"accept": false, "left": "pull_request:44529", "reason": "This is a revert-vs-original pair, not two PRs fixing the same concrete problem in a mergeable way.", "right": "pull_request:44552"}], "summary": "Two PRs are effectively the same tiny change to wav2vec2 tokenization, with #44529 being the merged copy of #44528. #44552 is a revert of that change, not a duplicate fix/change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44528|pull_request:44529", "pull_request:44528|pull_request:44552", "pull_request:44529|pull_request:44552"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2188, "estimated_input_tokens": 966, "item_count": 2, "node_count": 2, "serialized_chars": 3864, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:52:00Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a2e3b2d524179d9e1b52522df90293a6d93dfc172270fc7bfee52c4f30486dd2", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44596", "pull_request:44616"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44596 is the best representative because it is more explicit, more robust, and better validated than the exception-catching variant in PR 44616.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44596 is the cleaner fix: it handles float8 types up front, includes tests, and reads as a direct workaround for `torch.set_default_dtype` not accepting float8 dtypes.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44596", "reason": "Both PRs fix the same concrete bug in `local_torch_dtype`: loading float8 models causes `torch.set_default_dtype` to fail, and both fall back to `bfloat16`. They are plausible alternative implementations of the same fix and could be merged into one PR.", "right": "pull_request:44616"}], "summary": "Both pull requests address the same float8 failure in `local_torch_dtype` and target the same upstream issue. They are alternative fixes for the same code path, so they should be clustered together, with PR 44596 the stronger canonical choice."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44596|pull_request:44616"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 4506, "estimated_input_tokens": 2125, "item_count": 4, "node_count": 4, "serialized_chars": 8499, "soft_pair_count": 6}, "cached_at": "2026-04-14T18:52:14Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1aa3abe7fecddf854d1acc637d679adf999cbac626f6d5a28c669dc0632d8eae", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44599", "pull_request:44813", "pull_request:45021", "pull_request:45054"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45054 is the strongest global choice: it is the only merged PR and its change is narrowly scoped and concrete. The others affect different automation paths and are not the same change.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45054 is the best representative because it is merged and makes a small, self-contained workflow fix; the other PRs are separate unmerged workflow edits with different purposes.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44599", "reason": "Different workflow files and different fixes: one changes self-comment CI argument passing, the other changes repo-consistency bot comment parsing. Same broad area, but not the same underlying change.", "right": "pull_request:44813"}, {"accept": false, "left": "pull_request:44599", "reason": "One updates self-comment CI invocation details; the other changes the anti-slop workflow trigger from pull_request_target to pull_request. These are unrelated code paths.", "right": "pull_request:45021"}, {"accept": false, "left": "pull_request:44599", "reason": "Different workflow jobs and different intent: self-comment CI message handling vs metadata update token handling. Not mergeable as one fix.", "right": "pull_request:45054"}, {"accept": false, "left": "pull_request:44813", "reason": "Repo-consistency bot logic for comment commands is unrelated to anti-slop's event trigger/security change.", "right": "pull_request:45021"}, {"accept": false, "left": "pull_request:44813", "reason": "These touch different workflow automations and different implementation details; no shared concrete bug or change.", "right": "pull_request:45054"}, {"accept": false, "left": "pull_request:45021", "reason": "Anti-slop trigger/security semantics differ completely from the metadata workflow token-passing update; unrelated PRs.", "right": "pull_request:45054"}], "summary": "These are four unrelated workflow maintenance PRs touching different files and changing different behaviors, so they should not be deduplicated. The only strong candidate for a representative is the merged metadata workflow PR."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44599|pull_request:45054", "pull_request:44599|pull_request:45021", "pull_request:44599|pull_request:44813", "pull_request:44813|pull_request:45054", "pull_request:44813|pull_request:45021", "pull_request:45021|pull_request:45054"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 14022, "estimated_input_tokens": 6883, "item_count": 9, "node_count": 9, "serialized_chars": 27532, "soft_pair_count": 24}, "cached_at": "2026-04-14T18:52:48Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "131c31b9787f0a0b3c1589dd1b6eead60df38622cd3d6f95787cead2af7297a9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44630", "pull_request:44648", "pull_request:44651", "pull_request:44664", "pull_request:44688", "pull_request:44691", "pull_request:44693", "pull_request:44714", "pull_request:44920"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44920 is the best single PR to represent the cluster\u2019s core duplicate set because it targets the Qwen3.5 label-mapping propagation bug directly and in a more robust form than the earlier variants.", "canonical_issue_reason": null, "canonical_pr_reason": "44920 is the cleanest and most complete representative of the Qwen3.5 label-propagation fix: it directly propagates num_labels/id2label/label2id into text_config for the affected config path, matching the reported issue while staying focused on the same bug.", "confidence": 0.86, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44648", "reason": "Same Qwen3.5 label-propagation bug; both PRs update text_config so sequence-classification heads see the correct num_labels/label mappings.", "right": "pull_request:44920"}, {"accept": true, "left": "pull_request:44630", "reason": "Same underlying Qwen3.5 num_labels propagation fix, just implemented with different config synchronization details.", "right": "pull_request:44648"}, {"accept": true, "left": "pull_request:44630", "reason": "Same bug and same code path in Qwen3.5 config; 44920 is essentially a later refinement of the earlier propagation fix.", "right": "pull_request:44920"}, {"accept": true, "left": "pull_request:44648", "reason": "Both address the same Qwen label-propagation problem in Qwen3.5, even though 44693 bundles additional model updates.", "right": "pull_request:44693"}, {"accept": true, "left": "pull_request:44691", "reason": "Same Qwen3.5 text_config label propagation issue; 44693 is a broader variant of the same change.", "right": "pull_request:44693"}, {"accept": true, "left": "pull_request:44693", "reason": "Both fix the same Qwen3.5 label propagation bug; 44920 is a narrower, cleaner version of the same remedy.", "right": "pull_request:44920"}, {"accept": true, "left": "pull_request:44714", "reason": "Same underlying Qwen label propagation fix, with 44714 extending the patch to more Qwen-family configs.", "right": "pull_request:44920"}, {"accept": true, "left": "pull_request:44651", "reason": "Same Qwen3.5 label propagation change; both modify the Qwen3.5 config path to keep text_config in sync.", "right": "pull_request:44693"}, {"accept": false, "left": "pull_request:44688", "reason": "Related theme, but not the same concrete change set: 44688 is a broader multi-model/Qwen patch while 44693 bundles unrelated flex-attention and other model edits.", "right": "pull_request:44693"}, {"accept": false, "left": "pull_request:44648", "reason": "Different fix path: 44664 is a generic multimodal sequence-classifier change using get_text_config(), not the specific Qwen3.5 num_labels propagation bug.", "right": "pull_request:44664"}, {"accept": true, "left": "pull_request:44651", "reason": "Same underlying Qwen3.5/Qwen label propagation bug; 44688 is a broader multi-model version of the same fix.", "right": "pull_request:44688"}, {"accept": true, "left": "pull_request:44691", "reason": "Same Qwen3.5 label propagation issue; 44714 generalizes the same fix to more Qwen model variants.", "right": "pull_request:44714"}, {"accept": true, "left": "pull_request:44691", "reason": "Same bug and same code path; 44920 is a refined version that also propagates label mappings.", "right": "pull_request:44920"}, {"accept": true, "left": "pull_request:44630", "reason": "Same Qwen3.5 config propagation problem, with both PRs aiming to keep text_config aligned with top-level labels.", "right": "pull_request:44691"}, {"accept": true, "left": "pull_request:44630", "reason": "Same underlying fix for Qwen3.5 num_labels propagation, just with different implementation style.", "right": "pull_request:44651"}, {"accept": true, "left": "pull_request:44651", "reason": "Same Qwen3.5 issue; 44920 is the more complete label-propagation variant.", "right": "pull_request:44920"}, {"accept": false, "left": "pull_request:44688", "reason": "Although both are Qwen-family PRs, they are broader mixed patches and do not look like the same single concrete change that could be safely merged as one PR.", "right": "pull_request:44714"}], "summary": "The cluster splits into one duplicate set of Qwen/Qwen3.5 label-propagation PRs and two broader, related-but-distinct PRs: a generic multimodal sequence-classifier fix and a flex-attention compatibility change. The Qwen3.5 label-propagation PRs are duplicates of the same underlying bug; the broader classifier/flex-attention PRs are not."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44648|pull_request:44920", "pull_request:44630|pull_request:44648", "pull_request:44630|pull_request:44920", "pull_request:44648|pull_request:44693", "pull_request:44691|pull_request:44693", "pull_request:44693|pull_request:44920", "pull_request:44714|pull_request:44920", "pull_request:44651|pull_request:44693", "pull_request:44688|pull_request:44693", "pull_request:44648|pull_request:44664", "pull_request:44651|pull_request:44688", "pull_request:44691|pull_request:44714", "pull_request:44688|pull_request:44691", "pull_request:44688|pull_request:44920", "pull_request:44651|pull_request:44714", "pull_request:44651|pull_request:44664", "pull_request:44648|pull_request:44691", "pull_request:44648|pull_request:44651", "pull_request:44691|pull_request:44920", "pull_request:44651|pull_request:44691", "pull_request:44630|pull_request:44691", "pull_request:44630|pull_request:44651", "pull_request:44651|pull_request:44920", "pull_request:44688|pull_request:44714"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2946, "estimated_input_tokens": 1345, "item_count": 2, "node_count": 2, "serialized_chars": 5377, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:53:09Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "485b63aed7cb8fb2a98eec44b7f8c959b6847f09875d33d27b46a403963bebb8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44693", "pull_request:44714"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44714 is the better representative because it covers the same fix pattern across more Qwen variants and looks like the more complete patch.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44714 is the broader, more general version of the same `num_labels` propagation fix and appears to subsume the narrower Qwen3.5-only PR.", "confidence": 0.83, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44693", "reason": "Same underlying `num_labels` -> `text_config` propagation fix for Qwen models; 44714 appears to generalize the narrower Qwen3.5 change in 44693, and their file overlap suggests they could plausibly be merged into one PR.", "right": "pull_request:44714"}], "summary": "Two closed PRs appear to be near-duplicates around propagating Qwen `num_labels` into `text_config`; one is a narrower Qwen3.5-scoped version and the other broadens the same fix across Qwen models, with highly overlapping files and likely the same code-path change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44693|pull_request:44714"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2428, "estimated_input_tokens": 1086, "item_count": 2, "node_count": 2, "serialized_chars": 4343, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:53:26Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "110d7ccd7d3b04b1a107f3516345dc353dae2f77965ebb08d4dc5f6269cd3bd1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44649", "pull_request:44652"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44652 is the best representative of the cluster because it is the most targeted and least invasive implementation of the shared fix.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44652 is the more focused fix: it directly adds saving primary non-tokenizer sub-processors\u2019 standalone config files while preserving the existing flow, matching the bug description closely.", "confidence": 0.91, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44649", "reason": "Both PRs address the same underlying processor.save_pretrained defect: non-tokenizer sub-processors were not being saved to standalone config files. They touch the same file, target the same issue, and the code changes are close enough that they could plausibly be merged into one PR.", "right": "pull_request:44652"}], "summary": "Two pull requests appear to fix the same processor.save_pretrained bug: missing standalone config files for non-tokenizer sub-processors. They target the same issue and same file with very similar behavior changes, so they should be clustered together."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44649|pull_request:44652"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2738, "estimated_input_tokens": 1241, "item_count": 2, "node_count": 2, "serialized_chars": 4961, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:53:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "64d8f6c98964b19a1a5b57d89885631fa56386404aaa9b92c1060c15c6d4e328", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44685", "pull_request:45228"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#45228 is the strongest representative PR because it spans the main functional change (tiny model creation/registry support) rather than a localized test harness adjustment.", "canonical_issue_reason": null, "canonical_pr_reason": "#45228 is the better canonical PR for the cluster because it is the broader, more central change set; #44685 is a narrower test-only helper fix.", "confidence": 0.9, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44685", "reason": "Not the same underlying fix: #44685 updates model tester setup to pass `parent`, while #45228 adds tiny-model creation support and image processor registry entries across several models. Overlap is only general test/model infrastructure.", "right": "pull_request:45228"}], "summary": "The two PRs are both about model/test infra cleanup, but they address different problems: #44685 fixes missing `parent` plumbing in specific model tester classes, while #45228 is a broader tiny-model-creation and image-processor registration update across multiple models. They should not be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44685|pull_request:45228"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3674, "estimated_input_tokens": 1709, "item_count": 3, "node_count": 3, "serialized_chars": 6835, "soft_pair_count": 3}, "cached_at": "2026-04-14T18:53:52Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4062d91a1bfdf7772226b264c3a792bffc005ee0cf27da25edd7a81cc5893013", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44705", "pull_request:44706", "pull_request:44707"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44707 is the cleanest representative of the cluster because it is a straightforward forward-signature threading change, but it is still only representative of one model-specific implementation, not a duplicate of the others.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR stands out: each PR is model-specific (`RoFormer`, `Bloom`, `MPT`) and implements `position_ids` support in a different forward path, so they are not the same change.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44705", "reason": "Both add `position_ids`, but RoFormer changes positional embedding handling while Bloom only threads the new argument through its causal LM forward path. Different code paths and not plausibly one merged PR.", "right": "pull_request:44706"}, {"accept": false, "left": "pull_request:44705", "reason": "RoFormer\u2019s patch alters sinusoidal position embedding logic; MPT\u2019s patch is a separate signature/plumbing update. Same theme, but not the same underlying bug or change.", "right": "pull_request:44707"}, {"accept": false, "left": "pull_request:44706", "reason": "Bloom and MPT each add `position_ids` support in different model implementations. They are similar refactors, but not a single concrete change that should be deduplicated.", "right": "pull_request:44707"}], "summary": "Three closed PRs all add `position_ids` support to different model forward paths, but each touches a distinct architecture-specific implementation. They are thematically similar, not duplicates of the same concrete change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44705|pull_request:44706", "pull_request:44705|pull_request:44707", "pull_request:44706|pull_request:44707"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1866, "estimated_input_tokens": 805, "item_count": 2, "node_count": 2, "serialized_chars": 3218, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:54:09Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8d4d8572013842151e56b2131e23f36171047200f308572b483ab8070d4cf2a4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44712", "pull_request:45243"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45243 is the most up-to-date concrete change in the cluster: it advances the Dockerfile to torch 2.11/TorchCodec 0.11.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45243 is the later, more current NVIDIA CI image bump and is the best representative of this small upgrade series.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44712", "reason": "Same file and subsystem, but different concrete upgrades: one bumps PyTorch/TorchCodec to 2.10/0.10, the other to 2.11/0.11. They are sequential changes, not the same underlying fix/change.", "right": "pull_request:45243"}], "summary": "Two merged PRs that both update the same NVIDIA CI Dockerfile, but they are sequential version bumps to different PyTorch/TorchCodec releases (2.10/0.10 vs 2.11/0.11), so they are not duplicates of the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44712|pull_request:45243"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7886, "estimated_input_tokens": 3815, "item_count": 6, "node_count": 6, "serialized_chars": 15258, "soft_pair_count": 7}, "cached_at": "2026-04-14T18:54:28Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "45f1017f631fb5bd80e57b72ccd07bac50cdf119c5dabf61321743f273ba8a3b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44718", "pull_request:44723", "pull_request:44856", "pull_request:44916", "pull_request:44986", "pull_request:45039"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "Pull request 44986 is the best single PR exemplar: merged, narrowly scoped to the actual bug, and the clearest finished fix in the set.", "canonical_issue_reason": null, "canonical_pr_reason": "Pull request 44986 is the strongest canonical PR overall: it is merged, directly fixes the Deberta-v2 Python 3.13 compatibility issue, and represents the final concrete resolution rather than an earlier variant.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44718", "reason": "Same Pixio bug and same fix: propagate interpolate_pos_encoding through embeddings/model, with the same issue target and overlapping files.", "right": "pull_request:44723"}, {"accept": false, "left": "pull_request:44718", "reason": "Different subsystems and bugs: Pixio positional-encoding propagation vs Deberta-v2 Python 3.13 decorator/comment ordering.", "right": "pull_request:44986"}, {"accept": true, "left": "pull_request:44856", "reason": "Same Deberta-v2 Python 3.13 compatibility fix; both move the copied-from comment relative to @torch.jit.script in the same file.", "right": "pull_request:44916"}, {"accept": true, "left": "pull_request:44856", "reason": "Same underlying Deberta-v2 compatibility problem and same code pattern in modeling_deberta_v2.py; 44986 is a later variant of the same fix.", "right": "pull_request:44986"}, {"accept": true, "left": "pull_request:44916", "reason": "Same Deberta-v2 bug/fix family in the same file; 44986 is the merged cleanup of the same decorator/comment issue.", "right": "pull_request:44986"}, {"accept": true, "left": "pull_request:44723", "reason": "Same Pixio interpolate_pos_encoding propagation bug; 45039 is a broader but still equivalent fix in the same code path.", "right": "pull_request:45039"}, {"accept": true, "left": "pull_request:44718", "reason": "Same Pixio positional-encoding propagation change; both target the same underlying bug and could plausibly be merged into one PR.", "right": "pull_request:45039"}], "summary": "Two separate duplicate clusters are present: Pixio interpolate_pos_encoding propagation, and Deberta-v2 Python 3.13 @torch.jit.script/comment ordering. I treat within-cluster PRs as duplicates when they fix the same concrete code path, and reject cross-topic pairs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44718|pull_request:44723", "pull_request:44718|pull_request:44986", "pull_request:44856|pull_request:44916", "pull_request:44856|pull_request:44986", "pull_request:44916|pull_request:44986", "pull_request:44723|pull_request:45039", "pull_request:44718|pull_request:45039"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 9278, "estimated_input_tokens": 4511, "item_count": 6, "node_count": 6, "serialized_chars": 18043, "soft_pair_count": 12}, "cached_at": "2026-04-14T18:55:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "862922cca4baf457b3d5e4ce9cedb682bfcc55a834647fbdf66101a697ec56de", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44745", "pull_request:44753", "pull_request:44762", "pull_request:44782", "pull_request:44822", "pull_request:44824"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR `44782` is the best representative because it is merged, directly targets the underlying XLNet bug, and captures the fix in the most maintainable form.", "canonical_issue_reason": null, "canonical_pr_reason": "PR `44782` is the only merged fix here and the most complete/clean implementation of the same XLNet device-placement bug, with the explicit issue target `44737`.", "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44745", "reason": "`44745` is a broader multi-file sweep, not the same concrete XLNet-only change; they overlap in subsystem but not in exact fix scope.", "right": "pull_request:44824"}, {"accept": false, "left": "pull_request:44745", "reason": "`44745` is a broad unrelated sweep across many files, while `44762` is a focused XLNet positional-encoding patch; not the same change set.", "right": "pull_request:44762"}, {"accept": true, "left": "pull_request:44753", "reason": "Both patch the same XLNet `relative_positional_encoding` CPU/device bug by adding device-aware `torch.arange` calls; same underlying fix.", "right": "pull_request:44822"}, {"accept": true, "left": "pull_request:44753", "reason": "Same XLNet bug in the same function, with overlapping device-placement fixes; they are duplicate implementations of the same repair.", "right": "pull_request:44782"}, {"accept": true, "left": "pull_request:44753", "reason": "Despite the different title, the diff targets the same XLNet positional-encoding device-placement problem in the same code path.", "right": "pull_request:44762"}, {"accept": true, "left": "pull_request:44753", "reason": "Both address the same concrete XLNet `relative_positional_encoding` device-placement issue and could plausibly be merged into one PR.", "right": "pull_request:44824"}, {"accept": true, "left": "pull_request:44762", "reason": "Same underlying XLNet CPU/device-placement bug in `relative_positional_encoding`; the changes are functionally overlapping.", "right": "pull_request:44782"}, {"accept": true, "left": "pull_request:44762", "reason": "Both are fixes for the same XLNet positional-encoding device bug in the same file and code path.", "right": "pull_request:44822"}, {"accept": true, "left": "pull_request:44762", "reason": "Same concrete XLNet bug, same function, same class of device-placement edit; duplicate fix.", "right": "pull_request:44824"}, {"accept": true, "left": "pull_request:44782", "reason": "These are both the same XLNet `relative_positional_encoding` device-placement fix with only minor implementation differences.", "right": "pull_request:44822"}, {"accept": true, "left": "pull_request:44782", "reason": "Same exact bug and code path in XLNet; the PRs are alternative implementations of one fix.", "right": "pull_request:44824"}, {"accept": true, "left": "pull_request:44822", "reason": "Both PRs fix the same XLNet positional-encoding device-placement issue and are mergeable as one change.", "right": "pull_request:44824"}], "summary": "This cluster is dominated by one XLNet CPU/device-placement bug in `relative_positional_encoding`; one broader XLNet sweep is mixed in, but the core duplicate set is the narrow XLNet fixes. No issue artifacts are present. The merged PR `44782` is the best canonical representative."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44745|pull_request:44824", "pull_request:44745|pull_request:44762", "pull_request:44753|pull_request:44822", "pull_request:44753|pull_request:44782", "pull_request:44753|pull_request:44762", "pull_request:44753|pull_request:44824", "pull_request:44762|pull_request:44824", "pull_request:44762|pull_request:44782", "pull_request:44762|pull_request:44822", "pull_request:44782|pull_request:44822", "pull_request:44782|pull_request:44824", "pull_request:44822|pull_request:44824"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1928, "estimated_input_tokens": 836, "item_count": 2, "node_count": 2, "serialized_chars": 3344, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:56:37Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "28eac053bc589f290b66e5b28e3788208e3fc2dbc21b8c0571ee336dda7a713e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44783", "pull_request:44819"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44783 is the best representative because it addresses the core tokenizer resolution path rather than only updating the hub-class exception list.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44783 is the more direct substantive fix: it maps deepseek_v2 and deepseek_v3 to LlamaTokenizer in AutoTokenizer, which appears to be the primary behavior change.", "confidence": 0.87, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44783", "reason": "Related DeepSeek tokenizer fixes, but they change different code-paths and are not the same underlying patch; sharing the same issue target is not enough to treat them as duplicates.", "right": "pull_request:44819"}], "summary": "Two related but distinct PRs against tokenization_auto.py for DeepSeek v2/v3: one adds the model types to AutoTokenizer mapping, the other marks them as incorrect hub tokenizer classes. They share an issue target but do not fix the exact same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44783|pull_request:44819"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2442, "estimated_input_tokens": 1093, "item_count": 2, "node_count": 2, "serialized_chars": 4372, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:57:27Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5c04302bfd701809d6f8c6e1ac241329c93ddffa8cf64f443c2ce11e4ca7239d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44793", "pull_request:44814"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44814 best captures the shared bug class with the least extra surface area; #44793 is broader and includes an additional boi_token_id workaround.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44814 is the cleaner representative: it is narrower, directly fixes None handling in Janus image generation, and is the closed/finalized variant in the cluster.", "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44793", "reason": "Both touch Janus image generation and even share the expand_size None-guard, but they fix different null-value failures (boi_token_id vs max_length/cache len). That is related, not the same concrete bug, so do not merge as duplicates.", "right": "pull_request:44814"}], "summary": "Two Janus PRs target the same issue ticket and the same generate() path, but they patch different None-handling cases: one covers boi_token_id/generation_kwargs, the other covers max_length/cache sizing. Related, but not the same concrete fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44793|pull_request:44814"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 9628, "estimated_input_tokens": 4686, "item_count": 7, "node_count": 7, "serialized_chars": 18742, "soft_pair_count": 9}, "cached_at": "2026-04-14T18:57:53Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7e3c8c9165abce6bea5daca96d635ae714e9ddcfe3db452a5dee61f76e050eb3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44823", "pull_request:44826", "pull_request:44838", "pull_request:44842", "pull_request:44892", "pull_request:44946", "pull_request:45197"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44892 is the best overall representative of the bug fix because it covers the concrete failure path most comprehensively and is the most likely single PR to subsume the earlier variants.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44892 is the most complete fix for the shared URL-loading bug: it adds reusable URL helpers, wires them into image-processing and processing paths, and includes tests. It best represents the eventual code change family.", "confidence": 0.88, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44823", "reason": "Both PRs target the same AutoImageProcessor.from_pretrained URL-input failure in image_processing_base; they differ mainly in implementation detail and test coverage, so they are duplicates of the same fix.", "right": "pull_request:44838"}, {"accept": true, "left": "pull_request:44823", "reason": "Both aim to enable AutoImageProcessor to load image processor data from a URL, and both modify the same loading path; the extra helper work in 44826 is still part of the same underlying fix.", "right": "pull_request:44826"}, {"accept": false, "left": "pull_request:44946", "reason": "These are unrelated doc changes: 44946 is a from_pretrained docstring cleanup, while 45197 updates Gemma 4 documentation content and examples.", "right": "pull_request:45197"}, {"accept": true, "left": "pull_request:44838", "reason": "Same underlying URL-loading bug for AutoImageProcessor; 44892 is a broader refactor of the same fix with shared helpers and tests, so the PRs could plausibly be merged into one.", "right": "pull_request:44892"}, {"accept": false, "left": "pull_request:44892", "reason": "44892 is a code fix for URL loading, while 44946 only updates documentation wording about deprecated URL input. They are not the same change.", "right": "pull_request:44946"}, {"accept": false, "left": "pull_request:44826", "reason": "44826 is a functional URL-loading implementation for AutoImageProcessor, whereas 44946 is documentation-only. They do not fix the same concrete code path.", "right": "pull_request:44946"}, {"accept": false, "left": "pull_request:44823", "reason": "44823 fixes the runtime URL-loading bug in image_processing_base/auto image processor; 44946 only adjusts docstrings. Different changes and not duplicates.", "right": "pull_request:44946"}, {"accept": true, "left": "pull_request:44842", "reason": "Both PRs address the same AutoImageProcessor URL-loading regression in image_processing_base; 44892 builds on the same helper-based approach and extends it, so they are the same underlying fix family.", "right": "pull_request:44892"}, {"accept": true, "left": "pull_request:44826", "reason": "Both PRs are about the same AutoImageProcessor URL-input bug and patch the same loading flow, even if they take different implementation routes; they could plausibly be combined into one PR.", "right": "pull_request:44838"}], "summary": "The cluster is mostly competing pull requests for the same AutoImageProcessor URL-loading bug (#44821), plus two unrelated doc-only PRs. The strongest representative is the broader implementation in PR 44892; the gemma4 docs PR is unrelated."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44823|pull_request:44838", "pull_request:44823|pull_request:44826", "pull_request:44946|pull_request:45197", "pull_request:44838|pull_request:44892", "pull_request:44892|pull_request:44946", "pull_request:44826|pull_request:44946", "pull_request:44823|pull_request:44946", "pull_request:44842|pull_request:44892", "pull_request:44826|pull_request:44838"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5028, "estimated_input_tokens": 2386, "item_count": 4, "node_count": 4, "serialized_chars": 9543, "soft_pair_count": 6}, "cached_at": "2026-04-14T18:58:04Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2d1874319df462917849f9883b6c468ff85deddcac8bcb5103139eed42674425", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44885", "pull_request:44902", "pull_request:45006", "pull_request:45226"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44902 is the best single PR to keep as canonical because it most directly names and targets the shared bug, while the others are near-equivalent variants of the same fix.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44902 is the cleanest representative: it has the explicit issue target (44869), a precise title describing the OOB trailing-replacement-char bug, and a minimal focused fix in the exact affected code path.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44885", "reason": "Same Whisper timestamp decode path and same OOB guard fix; 44902 is just a slightly more explicit variant tied to issue 44869.", "right": "pull_request:44902"}, {"accept": true, "left": "pull_request:44885", "reason": "Both address the same trailing replacement-char IndexError in `_split_tokens_on_unicode`; the code changes are equivalent in effect.", "right": "pull_request:45006"}, {"accept": true, "left": "pull_request:44885", "reason": "Same bug, same file, same conditional bounds fix; only naming and guard syntax differ.", "right": "pull_request:45226"}, {"accept": true, "left": "pull_request:44902", "reason": "These are duplicate implementations of the same Whisper OOB fix for the same issue target and code path.", "right": "pull_request:45006"}, {"accept": true, "left": "pull_request:44902", "reason": "Both PRs solve the same trailing replacement character decoding bug in the same function with only minor refactoring differences.", "right": "pull_request:45226"}, {"accept": true, "left": "pull_request:45006", "reason": "Same underlying Whisper timestamp decoding bug and same concrete safeguard against out-of-range access; effectively mergeable into one PR.", "right": "pull_request:45226"}], "summary": "All four PRs patch the same Whisper `_split_tokens_on_unicode` out-of-bounds bug when a trailing replacement character is decoded. They differ only in small guard/refactor details and issue linkage, not in the underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44885|pull_request:44902", "pull_request:44885|pull_request:45006", "pull_request:44885|pull_request:45226", "pull_request:44902|pull_request:45006", "pull_request:44902|pull_request:45226", "pull_request:45006|pull_request:45226"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1874, "estimated_input_tokens": 809, "item_count": 2, "node_count": 2, "serialized_chars": 3233, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:58:14Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2e65866e028884b05d5fd5404f35cc9df103544ab68addb0205f87668229cc49", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44909", "pull_request:44932"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44909 is the best canonical PR because it is merged, directly addresses the tracked issue, and represents the finalized version of the identical fix.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44909 is the merged implementation of the fix, explicitly linked to issue #44908, and has the same code change as #44932.", "confidence": 0.99, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44909", "reason": "Same concrete bug fix in the same file and same code path, with identical diff semantics; these are clear duplicates and could be merged into one PR.", "right": "pull_request:44932"}], "summary": "Both PRs make the same one-line fix in `src/transformers/optimization.py` for `SchedulerType.INVERSE_SQRT`: pass `scheduler_specific_kwargs` through to the scheduler call. They target the same issue and are effectively duplicates, with 44909 being the merged version."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44909|pull_request:44932"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2284, "estimated_input_tokens": 1014, "item_count": 2, "node_count": 2, "serialized_chars": 4056, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:58:30Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3d788346e703de3ed3a88f45a3b2cd85cd7854a7e25df8ec956423bacb4eba26", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44914", "pull_request:44915"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44915 is the strongest standalone PR in the cluster: it changes the shared tokenizer path, has wider impact, and includes targeted test coverage.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44915 is the better canonical candidate because it addresses the broader underlying tokenizer cleanup behavior in core code and tests, rather than a model-specific conversion default.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44914", "reason": "Different scope and code path: 44914 only changes the Llama 3 tokenizer conversion default, while 44915 modifies PreTrainedTokenizerFast/BPE cleanup behavior globally. They are not the same underlying fix and would not plausibly merge into one PR.", "right": "pull_request:44915"}], "summary": "Both PRs touch tokenization cleanup, but they fix different problems: one is a Llama 3 conversion default, the other changes the global PreTrainedTokenizerFast/BPE cleanup behavior. They should not be deduplicated."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44914|pull_request:44915"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2432, "estimated_input_tokens": 1088, "item_count": 2, "node_count": 2, "serialized_chars": 4352, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:59:07Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "04917151b08fd6c8cbee4045a51d987ff1e0b63fddc0043c9ee569be6b287758", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44917", "pull_request:44985"], "result": {"analyst_result": {"best_issue_reason": "No issue item is present in the cluster; #44913 is only referenced as an external tracker.", "best_pr_reason": "PR #44985 is the best representative because it fixes the same concrete code-path problem as #44917 and does so more completely across both config classes.", "canonical_issue_reason": "Issue #44913 is the shared explicit tracker for the underlying save/load rotary_pct bug, but no issue artifact is present in this cluster.", "canonical_pr_reason": "PR #44985 is the best canonical PR: it is merged, targets the same bug, and extends the fix to both GPTNeoX and GPTNeoX Japanese config paths.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44917", "reason": "Both PRs fix the same underlying rotary_pct save/load regression in GPTNeoX configs, and #44917 appears to be a narrower subset of the merged fix in #44985.", "right": "pull_request:44985"}], "summary": "Both PRs address the same GPTNeoX rotary_pct persistence bug tracked by issue #44913. PR #44985 is the more complete and merged fix; PR #44917 is an earlier, narrower variant of the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44917|pull_request:44985"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2406, "estimated_input_tokens": 1075, "item_count": 2, "node_count": 2, "serialized_chars": 4300, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:59:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "badea7b8ed558cd8ad0534d931b16a97fee8eb931ce53dde324958b7598ae0e3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44924", "pull_request:44940"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44940 is the strongest standalone PR in the cluster: it has a specific bug description, touches the core initialization/model-loading path, and includes validation coverage.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44940 is the better canonical representative because it addresses a concrete concurrency bug with a clear scope boundary and explicit tests, whereas PR 44924 is a separate, narrower runtime fix in a different subsystem.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44924", "reason": "Different code paths and different bugs: 44924 changes CUDA graph capture mode for continuous batching thread safety; 44940 changes no-tie-weights scoping via ContextVar for concurrent model loading. They are not mergeable as one PR.", "right": "pull_request:44940"}], "summary": "These two PRs are unrelated: one fixes CUDA graph capture thread-safety in continuous batching, while the other fixes tie_weights skipping scope leaking across concurrent model loads. They do not share the same underlying bug or change path."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44924|pull_request:44940"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5062, "estimated_input_tokens": 2403, "item_count": 4, "node_count": 4, "serialized_chars": 9612, "soft_pair_count": 4}, "cached_at": "2026-04-14T18:59:49Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "05c9fb3777cab8cfc3e2c9e5dd2c72fde62f1f7ff51567ed5dd5d3c738d3ad01", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44978", "pull_request:45015", "pull_request:45043", "pull_request:45089"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45015 is the strongest standalone representative of the cluster because it covers both affected helpers in `modeling_utils.py`, adds tests, and has the clearest linkage to the tracked bug.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45015 is the best canonical PR: it fixes the same `sys.modules` failure mode as the others, explicitly targets issue 45003, and includes tests for the behavior change. It is broader and more complete than the minimal code-only variants.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44978", "reason": "Same underlying bug and same code path in `modeling_utils.py`; 45015 is essentially a fuller version of 44978 with tests and an issue link.", "right": "pull_request:45015"}, {"accept": true, "left": "pull_request:45015", "reason": "Near-identical fix for the same `sys.modules` KeyError in the same functions and file; the difference is mostly presence of tests/wording.", "right": "pull_request:45089"}, {"accept": true, "left": "pull_request:45015", "reason": "Both PRs implement the same concrete fix in `modeling_utils.py` and are mergeable as one change; 45043 is just another variant with the same code-path correction.", "right": "pull_request:45043"}, {"accept": true, "left": "pull_request:45043", "reason": "They are the same bug fix in the same file with the same underlying patch pattern; one is just more complete with tests.", "right": "pull_request:45089"}], "summary": "All four PRs are closely related variants of the same bug fix in `modeling_utils.py`: guarding `sys.modules` access to avoid a `KeyError` when resolving attention/experts implementation. PR 45015 is the most complete and best canonical representative because it includes the fix plus tests and an explicit issue target."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is grounded in the packet: all four PRs touch the same `modeling_utils.py` bug path, and the pairwise acceptances are conservative enough for near-duplicate bugfix variants. The canonical choice of PR 45015 is also supported by its tests and explicit issue link."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:44978|pull_request:45015", "pull_request:45015|pull_request:45089", "pull_request:45015|pull_request:45043", "pull_request:45043|pull_request:45089"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2460, "estimated_input_tokens": 1102, "item_count": 2, "node_count": 2, "serialized_chars": 4408, "soft_pair_count": 1}, "cached_at": "2026-04-14T18:59:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7b4d830c40352bb8e9dd7eee9064d59b7723631f2f9058df2faba01d26edf744", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44979", "pull_request:45363"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45363 is the best single representative because it covers the main user-facing fusion workflow and supporting integration code, whereas #44979 is a lower-level API addition focused on generic module composition.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45363 is the broader, more end-to-end change: it introduces `KernelConfig`, integration plumbing, and a concrete fused-model example. It is the stronger representative of the kernel-fusion feature set in this cluster.", "confidence": 0.9, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44979", "reason": "Both are about fusion, but #44979 adds a standalone module-fusion API in `src/transformers/module_fusion.py`, while #45363 implements `KernelConfig`-driven n-to-1 kernel fusion and integration plumbing. Different abstractions, different code paths, not the same concrete change.", "right": "pull_request:45363"}], "summary": "These PRs are conceptually related to model/module fusion, but they target different layers and code paths: one adds a generic module-fusion API, while the other wires up kernel-based n-to-1 fusion via `KernelConfig` and integration hooks. They do not look like duplicates or candidates to merge into one PR."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44979|pull_request:45363"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3116, "estimated_input_tokens": 1430, "item_count": 2, "node_count": 2, "serialized_chars": 5718, "soft_pair_count": 1}, "cached_at": "2026-04-14T19:00:18Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2af3b3011cddc5e1bad9a4a82e2d2b9ebac96ca7461ba6fa94ccdd924cb2c292", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44996", "pull_request:45028"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45028 is the best overall fit for the cluster topic: it most comprehensively represents the ongoing distributed/TP refactor work and has the broadest scope among the two.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45028 is the better canonical representative because it is the larger, more central refactor around TP + FSDP integration and includes the broader set of changes/tests in this area.", "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44996", "reason": "They overlap in distributed/tensor-parallel infrastructure, but #44996 is a narrower merged from_pretrained refactor while #45028 is a broader TP/FSDP integration refactor with additional model/config changes. Same subsystem, not the same concrete change.", "right": "pull_request:45028"}], "summary": "These are related but not duplicates: both PRs touch distributed/tensor-parallel refactoring, but #45028 is a broader, still-open TP/FSDP integration refactor while #44996 is a smaller merged refactor focused on from_pretrained distributed loading. They share some files, but the concrete change sets and scope are different."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44996|pull_request:45028"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5868, "estimated_input_tokens": 2806, "item_count": 3, "node_count": 3, "serialized_chars": 11222, "soft_pair_count": 3}, "cached_at": "2026-04-14T19:00:24Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "db5f8f952a6fe438c54606db06942ab737b88a96b1ff5820288756a05b37b40f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44997", "pull_request:44999", "pull_request:45133"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45133 is the strongest global representative: it is open, appears to be the most current iteration, and covers the same concrete model implementation/files as the earlier closed drafts.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45133 is the best canonical PR because it is the latest open revision of the same Sarvam model addition, and the file set/title matches the earlier attempts closely.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:44997", "reason": "Same title, same four Sarvam files, and both are draft model-addition PRs for the same underlying implementation.", "right": "pull_request:44999"}, {"accept": true, "left": "pull_request:44997", "reason": "Same Sarvam model addition with the same file targets; the later PR is clearly an updated revision of the same change.", "right": "pull_request:45133"}, {"accept": true, "left": "pull_request:44999", "reason": "Same concrete model implementation and same touched files, differing only by revision completeness.", "right": "pull_request:45133"}], "summary": "All three pull requests are the same Sarvam model-addition effort, touching the same generated/model files with near-identical scope. The open PR is the best canonical representative, while the two closed drafts are duplicate attempts/earlier revisions of the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44997|pull_request:44999", "pull_request:44997|pull_request:45133", "pull_request:44999|pull_request:45133"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3682, "estimated_input_tokens": 1713, "item_count": 2, "node_count": 2, "serialized_chars": 6850, "soft_pair_count": 1}, "cached_at": "2026-04-14T19:00:33Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "df592a1ac5fffc040954f0b704338fd56909c29fa0fc426a12e37801d28a20c7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45010", "pull_request:45077"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45010 is the better cluster representative because it is the broader, more complete variant of this workflow-security cleanup; #45077 looks like a smaller follow-up with a different exact fix set.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45010 is the more substantial and representative workflow-hardening change: it pins many more actions and fixes multiple unsafe expressions across a broad set of workflow files.", "confidence": 0.9, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45010", "reason": "Same general theme, but not the same concrete change: #45010 pins 69 actions and extracts 2 unsafe expressions, while #45077 pins 50 actions and extracts 1 secret. They touch overlapping workflows, but the underlying fixes are different and not mergeable into one identical PR.", "right": "pull_request:45077"}], "summary": "These are related workflow-hardening PRs, but they are not duplicates: both pin unpinned GitHub Actions and move values into env vars, yet they target different sets of workflow files and different unsafe expressions/secrets with different scope and size."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45010|pull_request:45077"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2828, "estimated_input_tokens": 1286, "item_count": 2, "node_count": 2, "serialized_chars": 5142, "soft_pair_count": 1}, "cached_at": "2026-04-14T19:00:53Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d175ed1e6defa50e1f502227bcbeafedb341dc80efeacdeb026bd2f5c18710a3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45066", "pull_request:45076"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45076 is the better representative of the cluster because it covers the broader end-state of the CLI feature set and includes more supporting code paths and tests.", "canonical_issue_reason": null, "canonical_pr_reason": "Pick PR #45076 as canonical because it looks broader and more complete: it adds the hardware-aware advisor plus extra hardware utilities, whereas #45066 appears to be an earlier/narrower advisor + cleanup variant.", "confidence": 0.69, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45066", "reason": "Do not merge as duplicates: both touch similar CLI files, but the titles and file sets suggest related enhancements with different scopes (legacy cleanup vs hardware-aware advisor/selective weight surgery), not one identical bug fix or change.", "right": "pull_request:45076"}], "summary": "Two overlapping Transformers CLI enhancement PRs, both centered on a new advisor/compress-related workflow, but they appear to be different feature sets rather than the same concrete change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45066|pull_request:45076"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2328, "estimated_input_tokens": 1036, "item_count": 2, "node_count": 2, "serialized_chars": 4144, "soft_pair_count": 1}, "cached_at": "2026-04-14T19:01:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8d660862bc33781fb8ef86bfe2679671af11310aee1ccf213f24cd2d3638462a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45080", "pull_request:45129"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45080 is the best representative of the underlying bug fix because it resolves the Pydantic compatibility problem at the class-schema level rather than by loosening a single field annotation.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45080 is the more direct root-cause fix: it teaches Pydantic to treat `PreTrainedConfig` as an instance type instead of introspecting its dataclass fields, which addresses the schema-generation failure broadly.", "confidence": 0.81, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45080", "reason": "Both PRs fix the same concrete bug: Pydantic failing to generate a schema for `PreTrainedConfig` used as a field type. They touch the same class and same code path, and could plausibly be combined into one patch.", "right": "pull_request:45129"}], "summary": "Both PRs target the same Pydantic schema-generation failure for `PreTrainedConfig` after the dataclass conversion, using different implementation strategies on the same class/file."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45080|pull_request:45129"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2464, "estimated_input_tokens": 1104, "item_count": 2, "node_count": 2, "serialized_chars": 4414, "soft_pair_count": 1}, "cached_at": "2026-04-14T19:01:44Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a44b7bda9b2a24a352f297b0df8f17b210be96ee514f515c6404d46bf2c5b2f5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45086", "pull_request:45317"], "result": {"analyst_result": {"best_issue_reason": "`#45081` is the best issue node because both PRs directly target it and the titles/diffs align with that bug report.", "best_pr_reason": "`#45317` is the best PR node because it is the fuller implementation of the same change, with matching code edits and added tests.", "canonical_issue_reason": "Issue `#45081` is the shared explicit target for both PRs and is the underlying bug report they address.", "canonical_pr_reason": "PR `#45317` is the better canonical PR because it contains the same code fix as `#45086` plus test coverage, making it the more complete patch.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45086", "reason": "Same concrete bug fix, same affected file and identical code hunk; `45317` appears to be the later, test-backed version of `45086`.", "right": "pull_request:45317"}], "summary": "Both PRs implement the same fix for the same AttributeError in `_patch_mistral_regex` by changing the pre-tokenizer access in `tokenization_utils_tokenizers.py`; `45317` is the more complete version because it adds tests."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45086|pull_request:45317"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3526, "estimated_input_tokens": 1635, "item_count": 3, "node_count": 3, "serialized_chars": 6540, "soft_pair_count": 3}, "cached_at": "2026-04-14T19:03:27Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "dd695f5c28b3e8a9c95f283b64f1885904d613da1952fb7e14f393ec8632b67a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45087", "pull_request:45119", "pull_request:45240"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#45240 is the best single PR to represent the cluster since it is merged and solves the same underlying type-checking issue in the same code path, while the earlier PRs explore different approaches.", "canonical_issue_reason": null, "canonical_pr_reason": "#45240 is the strongest PR representative because it is merged and directly addresses the shared mypy/type-checking problem in the target file, but it should be treated as one implementation among several rather than a proven final form.", "confidence": 0.78, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45087", "reason": "Both hit the same issue target and file, but one uses a TYPE_CHECKING early return while the other preserves __signature__; these are different implementation strategies, not clearly the same change or safely mergeable into one PR.", "right": "pull_request:45119"}, {"accept": false, "left": "pull_request:45119", "reason": "They address the same bug area, but #45119 and #45240 fix it with different mechanisms (__signature__ preservation vs dataclass_transform on the class). That is close, but not enough to call them duplicates of the same concrete change.", "right": "pull_request:45240"}, {"accept": false, "left": "pull_request:45087", "reason": "Both relate to the same type-checking issue in the same file, but the fixes are materially different and would require choosing between competing implementations, so they should not be merged as one soft-edge duplicate.", "right": "pull_request:45240"}], "summary": "All three PRs target the same issue (#45071) and the same file, src/transformers/configuration_utils.py, but they use different type-checking fix strategies. The packet supports grouping them by shared bug area, not treating them as clearly equivalent or safely mergeable into one change."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": false, "feedback": "The packet only supports a conservative statement that all three PRs relate to the same issue target and file. It does not support calling #45119 a \u201cnear-identical\u201d earlier implementation of #45240 or #45240 the \u201cfinal accepted form\u201d of #45119; the diffs show different fix strategies (TYPE_CHECKING early return, __signature__ preservation, and dataclass_transform on the class). The soft-edge accepts are too broad because the changes are not clearly mergeable into one single PR without choosing among competing implementations. The summary should be narrowed to shared issue/file, with uncertainty about equivalence and mergeability."}, "evaluator_used": true, "retried": true}, "soft_pairs": ["pull_request:45087|pull_request:45119", "pull_request:45119|pull_request:45240", "pull_request:45087|pull_request:45240"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2478, "estimated_input_tokens": 1111, "item_count": 2, "node_count": 2, "serialized_chars": 4442, "soft_pair_count": 1}, "cached_at": "2026-04-14T19:03:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "53c2d7295e4a14758baecab74d6213350bda0f2c10e75fac1d2ff7038b0644d8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45104", "pull_request:45105"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45105 is the best overall PR candidate: same concrete bug, same code path, and it includes tests, so it is the strongest representative of the fix.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45105 is the better canonical PR because it addresses the same crash with a fuller implementation and adds regression coverage in tests, making it the more complete fix for the shared issue.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45104", "reason": "Same underlying crash in `src/transformers/utils/auto_docstring.py` caused by string annotations under future annotations; PR 45105 is a fuller version of the same fix and could plausibly subsume 45104.", "right": "pull_request:45105"}], "summary": "Both PRs target the same auto_docstring failure when `from __future__ import annotations` leaves kwarg annotations as strings in `_process_kwargs_parameters`; PR 45105 is the more complete version with tests, while 45104 is a minimal precursor/fix attempt."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45104|pull_request:45105"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1838, "estimated_input_tokens": 791, "item_count": 2, "node_count": 2, "serialized_chars": 3161, "soft_pair_count": 1}, "cached_at": "2026-04-14T19:04:13Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "60cc5d76b19b8a92b2ed872c5f47bc0ee50e0fcf4b6db832033d96c042a2d119", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45107", "pull_request:45108"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "No PR is a strong global representative for the cluster because the two changes are unrelated and not mergeable into one fix.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR: these land in different files, different subsystems, and fix different underlying problems.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45107", "reason": "Reject: the PRs fix different issues in different code paths (`text_to_audio` config merging vs. `Wav2Vec2Config` typing). They are not the same underlying bug or change.", "right": "pull_request:45108"}], "summary": "The two PRs are unrelated: one patches a text-to-audio pipeline crash caused by `None` values in generation config, while the other relaxes `Wav2Vec2Config.vocab_size` typing to allow `None`. They should not be clustered as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45107|pull_request:45108"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2676, "estimated_input_tokens": 1210, "item_count": 2, "node_count": 2, "serialized_chars": 4840, "soft_pair_count": 1}, "cached_at": "2026-04-14T19:04:39Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d852c17b317f2bf22e7c12ca32bd8105ac811a228ee05c78f88558260dfa2b3e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45158", "pull_request:45159"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45158 is the best overall representative of the change set: it is merged and covers the core Turkish docs addition, while #45159 is a similar follow-up/alternate PR with extra workflow edits.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45158 is the better canonical PR because it is already merged and cleanly represents the Turkish Get Started translation work.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45158", "reason": "Accept: same underlying Turkish Get Started documentation addition, with substantial overlap in the docs files and workflow changes; the extra PR-doc workflow tweak in #45159 does not change the core fact that these are the same feature and could plausibly be merged into one PR.", "right": "pull_request:45159"}], "summary": "Both PRs are about adding Turkish documentation for the Get Started section and touch the same Turkish docs files, so they look like near-duplicate implementations of the same feature."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45158|pull_request:45159"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2354, "estimated_input_tokens": 1049, "item_count": 2, "node_count": 2, "serialized_chars": 4193, "soft_pair_count": 1}, "cached_at": "2026-04-14T19:04:49Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e82983c99c061de532638d4e49b46b5253a8eda159e6cf8f733233391b8868f3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45266", "pull_request:45267"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45266", "reason": "Reject: these are different PRs in different model files (ALBERT vs DistilBERT) with no shared concrete bug or code path; the overlap is just generic refactoring/docstring/style edits.", "right": "pull_request:45267"}], "summary": "Two pull requests with superficial similarity (docstring/formatting cleanup) but they touch different model implementations and do not appear to fix the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45266|pull_request:45267"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2766, "estimated_input_tokens": 1255, "item_count": 2, "node_count": 2, "serialized_chars": 5019, "soft_pair_count": 1}, "cached_at": "2026-04-14T19:04:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8419e8627bf2768a63a74d0a53795c27a4d731a7290d0c57464074839e3984d3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45314", "pull_request:45361"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#45361 is the strongest standalone PR in this set: it has the clearer current scope, explicit conversion-mapping additions, and more discussion activity.", "canonical_issue_reason": null, "canonical_pr_reason": "#45361 is the better representative of the cluster because it is the more active and broader conversion-mapping change, while #45314 is a narrower, closed follow-up in a different model family.", "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45314", "reason": "Do not merge: #45314 changes text-model checkpoint conversion for Gemma/Qwen LLM loading, whereas #45361 adds CLIP-like vision-model conversion rules for VLMs. Same file, different code paths and different model families.", "right": "pull_request:45361"}], "summary": "These PRs both touch `conversion_mapping.py`, but they address different conversion paths: #45314 is about loading LLM classes from VLM checkpoints (Gemma/Qwen text mappings), while #45361 adds CLIP-like vision-model conversions for VLMs. Shared subsystem, different concrete behavior, so they are not duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45314|pull_request:45361"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2618, "estimated_input_tokens": 1181, "item_count": 2, "node_count": 2, "serialized_chars": 4721, "soft_pair_count": 1}, "cached_at": "2026-04-14T19:06:15Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fe5b3903cdc6c7604baa6e34661b522132d7fef6af30e342d112d379638dc92f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45379", "pull_request:45380"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "45380 is the best PR to keep: it is the more complete implementation of the fix and directly targets the shared issue.", "canonical_issue_reason": null, "canonical_pr_reason": "45380 is the stronger canonical PR because it covers the same field addition on both Qwen3_5VisionConfig and Qwen3_5MoeVisionConfig, whereas 45379 only patches the MoE config.", "confidence": 0.82, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45379", "reason": "Accept: both PRs fix the same underlying `deepstack_visual_indexes` config-dropping problem for Qwen3.5 vision models, and 45379 looks like a narrower subset of 45380.", "right": "pull_request:45380"}], "summary": "Both PRs address the same Qwen3.5 vision-config serialization gap around `deepstack_visual_indexes`. PR 45380 is the broader, more complete fix and appears to subsume 45379's narrower MoE-only change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45379|pull_request:45380"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2664, "estimated_input_tokens": 1204, "item_count": 2, "node_count": 2, "serialized_chars": 4816, "soft_pair_count": 1}, "cached_at": "2026-04-14T19:06:26Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1e5b425ab48c5be5e9ae5e52620ded68dc1df2d7acd94031928326e2c9341b93", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45395", "pull_request:45414"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45414 is merged, has the same issue target and file set as #45395, and represents the final accepted change.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45414 is the merged instance of the duplicated fix and is the best canonical PR to keep.", "confidence": 0.99, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45395", "reason": "Same title, same explicit issue target (#45137), same touched files, and effectively the same code-path fix; these could plausibly be merged into one PR.", "right": "pull_request:45414"}], "summary": "These are duplicate pull requests for the same fix: both target issue #45137, touch the same files, and have the same title and near-identical diff scope. PR #45414 is the merged canonical PR; #45395 is the redundant unmerged duplicate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45395|pull_request:45414"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1496, "estimated_input_tokens": 620, "item_count": 3, "node_count": 3, "serialized_chars": 2477, "soft_pair_count": 2}, "cached_at": "2026-04-14T20:10:41Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8de19dda1341c388e2af84bd6b0c83c22e7cc0b4f3a2b26a4db2a5f7d7b6a612", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:13244", "issue:15980", "issue:16618"], "result": {"analyst_result": {"best_issue_reason": "#16618 is the best single issue to keep as the cluster representative because it is a direct failure-to-load report, but it should not subsume the other two as duplicates.", "best_pr_reason": null, "canonical_issue_reason": "Issue #16618 is the most concrete user-facing model-loading report, but it is not a true duplicate of the others; it is only the closest representative of the cluster by topic breadth.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:13244", "reason": "Different bugs: TAPAS tokenization parity with TensorFlow code versus inability to load bert-base-uncased. No shared code path or concrete fix overlap is evident.", "right": "issue:16618"}, {"accept": false, "left": "issue:15980", "reason": "Different failure modes: private-model download/authentication error message versus loading a public model. Related to model loading broadly, but not the same underlying bug.", "right": "issue:16618"}], "summary": "These three issues are not duplicates: one is about TAPAS tokenization mismatch, one about authentication/error messaging when downloading a private model, and one about failing to load bert-base-uncased. They share only broad model-loading/library context."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:13244|issue:16618", "issue:15980|issue:16618"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7822, "estimated_input_tokens": 3783, "item_count": 18, "node_count": 18, "serialized_chars": 15130, "soft_pair_count": 16}, "cached_at": "2026-04-14T20:10:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6f39acfd230557b8a448c89376608a8b1e6dc0c48ed8dd76df67f1fc0725fcc6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30277", "issue:34634", "issue:35545", "issue:36010", "issue:41720", "issue:42915", "issue:43596", "issue:43638", "issue:43716", "issue:43828", "issue:44292", "issue:44509", "issue:44512", "issue:44661", "issue:44805", "issue:44918", "issue:45161", "issue:45237"], "result": {"analyst_result": {"best_issue_reason": "issue:43596 is the best representative of the only accepted duplicate pair and has the most precise title for the underlying bug.", "best_pr_reason": null, "canonical_issue_reason": "issue:43596 is the clearest, most specific description of the shared BertModel + ZeRO-3 index-out-of-bounds bug; issue:43638 appears to be the same underlying report with slightly different wording.", "canonical_pr_reason": null, "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "issue:43716", "reason": "Different models and different failure modes: image-preprocessor dtype mismatch vs AMD GPU runtime failure.", "right": "issue:45237"}, {"accept": false, "left": "issue:43828", "reason": "Both mention dtype issues, but they describe different model/hardware contexts and do not look like the same bug.", "right": "issue:45237"}, {"accept": false, "left": "issue:44292", "reason": "Different model and likely different backend/runtime problem; not the same concrete bug.", "right": "issue:45237"}, {"accept": false, "left": "issue:30277", "reason": "Unrelated failures: distributed collective mismatch during ZeRO-3 vs ONNX export error.", "right": "issue:35545"}, {"accept": false, "left": "issue:35545", "reason": "Different models and different code paths: ONNX export vs FineGrainedFP8Config runtime failure.", "right": "issue:42915"}, {"accept": false, "left": "issue:35545", "reason": "ModernBERT ONNX export error and Qwen3 device-mapping CUDA assert are unrelated.", "right": "issue:41720"}, {"accept": false, "left": "issue:44509", "reason": "Both are docs regressions, but they concern different removed commands and do not appear to be the same change.", "right": "issue:44512"}, {"accept": false, "left": "issue:43638", "reason": "Both are IndexError reports, but the BertModel ZeRO-3 init issue and the mask-shape mismatch are different failures.", "right": "issue:44805"}, {"accept": false, "left": "issue:35545", "reason": "ONNX export failure and GenerationMixin import failure are unrelated.", "right": "issue:36010"}, {"accept": false, "left": "issue:43596", "reason": "Different error messages and likely different code paths; not the same underlying bug.", "right": "issue:44805"}, {"accept": false, "left": "issue:44805", "reason": "Different model families and failure modes; no clear shared bug.", "right": "issue:45161"}, {"accept": false, "left": "issue:44805", "reason": "Mask-shape IndexError and Qwen3.5 embedding unpacking failure are different issues.", "right": "issue:44918"}, {"accept": false, "left": "issue:44661", "reason": "Tokenizer mapping failure while adding a model is unrelated to the mask/tensor IndexError.", "right": "issue:44805"}, {"accept": false, "left": "issue:30277", "reason": "Collective mismatch during distributed training is not the same as the BertModel ZeRO-3 index error.", "right": "issue:43638"}, {"accept": false, "left": "issue:34634", "reason": "BarkProcessor voice_preset bug and ModernBERT export error are unrelated.", "right": "issue:35545"}, {"accept": true, "left": "issue:43596", "reason": "Same BertModel + DeepSpeed ZeRO-3 index-out-of-bounds failure, described with only minor wording differences.", "right": "issue:43638"}], "summary": "Mostly unrelated issue set; the only clear duplicate pair is the two BertModel + DeepSpeed ZeRO-3 index-out-of-bounds reports."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43716|issue:45237", "issue:43828|issue:45237", "issue:44292|issue:45237", "issue:30277|issue:35545", "issue:35545|issue:42915", "issue:35545|issue:41720", "issue:44509|issue:44512", "issue:43638|issue:44805", "issue:35545|issue:36010", "issue:43596|issue:44805", "issue:44805|issue:45161", "issue:44805|issue:44918", "issue:44661|issue:44805", "issue:30277|issue:43638", "issue:34634|issue:35545", "issue:43596|issue:43638"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7102, "estimated_input_tokens": 3423, "item_count": 17, "node_count": 17, "serialized_chars": 13690, "soft_pair_count": 12}, "cached_at": "2026-04-14T20:11:13Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2abf7274e9b185445a9106749fd14476c822dd52d8b737d46e71333ab8df5e3d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:24643", "issue:30277", "issue:34928", "issue:36331", "issue:39290", "issue:43039", "issue:43425", "issue:43596", "issue:43638", "issue:43828", "issue:43975", "issue:44292", "issue:44457", "issue:44805", "issue:45081", "issue:45161", "issue:45237"], "result": {"analyst_result": {"best_issue_reason": "Issue 43596 is the strongest global issue choice because it is the cleanest, broadest formulation of the only closely related bug in the set and is more canonical than the more specific 43638.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43596 is the best representative of the only strong duplicate-like subcluster: it states the general DeepSpeed ZeRO-3 + BERT IndexError clearly, while 43638 is a narrower variant of the same report.", "canonical_pr_reason": null, "confidence": 0.67, "soft_edge_verdicts": [{"accept": false, "left": "issue:43975", "reason": "Different bugs: one is incorrect detokenization for a specific model, the other is LoRA merge/save/reload output drift.", "right": "issue:44457"}, {"accept": false, "left": "issue:43039", "reason": "Both mention model/runtime behavior, but the failures are unrelated: Liger cross-entropy dispatch vs GPT-OSS tensor-parallel support.", "right": "issue:45161"}, {"accept": false, "left": "issue:44292", "reason": "Qwen-3 NVFP4 runtime failure and LoRA merge/reload mismatch are different code paths and symptoms.", "right": "issue:44457"}, {"accept": false, "left": "issue:34928", "reason": "Both are shape-related errors, but one is FSDP + activation checkpointing recomputation mismatch and the other is a mask/indexing shape error; not the same underlying bug.", "right": "issue:44805"}, {"accept": false, "left": "issue:24643", "reason": "Both involve DeepSpeed, but the concrete failures differ: 2-D weight requirement vs collective mismatch on ranks.", "right": "issue:30277"}, {"accept": false, "left": "issue:43975", "reason": "Detokenization bug for DeepSeek coder is unrelated to GPT-OSS-20B failing on AMD GPUs.", "right": "issue:45237"}, {"accept": false, "left": "issue:43638", "reason": "Different issues: DeepSpeed ZeRO-3/BERT IndexError vs GPT-OSS MoE tensor-parallel not working.", "right": "issue:45161"}, {"accept": false, "left": "issue:34928", "reason": "Activation checkpointing/FSDP tensor-size mismatch is unrelated to the Trainer compute_loss signature TypeError.", "right": "issue:36331"}, {"accept": false, "left": "issue:43596", "reason": "DeepSpeed ZeRO-3/BERT init IndexError and GPT-OSS MoE TP failure are unrelated.", "right": "issue:45161"}, {"accept": false, "left": "issue:39290", "reason": "Both are tokenizer/config-related crashes, but one is a Gemma3 vLLM config attribute error and the other is a Mistral regex patch backend_tokenizer attribute error.", "right": "issue:45081"}, {"accept": false, "left": "issue:43828", "reason": "Autocast dtype mismatch for Phi-tiny-MoE is not the same bug as GPT-OSS MoE tensor-parallel failure.", "right": "issue:45161"}, {"accept": false, "left": "issue:43425", "reason": "Torch version incompatibility is too broad and does not match the specific Qwen-3 NVFP4 runtime failure.", "right": "issue:44292"}], "summary": "This cluster is mostly a grab-bag of unrelated training/runtime issues across different models and backends. The only clearly near-duplicate subcluster is the DeepSpeed ZeRO-3 + BERT IndexError pair (43596/43638); the rest should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43975|issue:44457", "issue:43039|issue:45161", "issue:44292|issue:44457", "issue:34928|issue:44805", "issue:24643|issue:30277", "issue:43975|issue:45237", "issue:43638|issue:45161", "issue:34928|issue:36331", "issue:43596|issue:45161", "issue:39290|issue:45081", "issue:43828|issue:45161", "issue:43425|issue:44292"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7778, "estimated_input_tokens": 3761, "item_count": 18, "node_count": 18, "serialized_chars": 15044, "soft_pair_count": 15}, "cached_at": "2026-04-14T20:11:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c3f424d68a8ed25c30a371ba3ddc07aed224186b77b105edea82a8d06a7a5b89", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30277", "issue:34928", "issue:36010", "issue:36331", "issue:41720", "issue:41762", "issue:43039", "issue:43541", "issue:43638", "issue:43716", "issue:43827", "issue:43828", "issue:44292", "issue:44512", "issue:44560", "issue:44805", "issue:44918", "issue:45161"], "result": {"analyst_result": {"best_issue_reason": "No single issue is a good representative for deduping this cluster because the overlap is mostly superficial.", "best_pr_reason": null, "canonical_issue_reason": "No clear canonical issue: the items span unrelated bugs and docs regressions across different models, parallelism modes, and subsystems.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43541", "reason": "Different models and failures: torch dynamo grouped_mm tracing error vs image preprocessor/model dtype mismatch.", "right": "issue:43716"}, {"accept": false, "left": "issue:44918", "reason": "Both mention Qwen3/embedding-related behavior, but one is an SFT trainer unpacking issue and the other is a TP-only MoE problem.", "right": "issue:45161"}, {"accept": false, "left": "issue:34928", "reason": "Different root causes and symptoms: activation checkpointing/FSDP tensor recomputation vs Qwen3 auto device map cuda assert.", "right": "issue:41720"}, {"accept": false, "left": "issue:43828", "reason": "One is an autocast dtype mismatch on Phi-tiny-MoE, the other is a Qwen-3 NVFP4 runtime error; not the same bug.", "right": "issue:44292"}, {"accept": false, "left": "issue:43716", "reason": "Both involve model execution errors, but the models and failure modes differ substantially (dtype mismatch vs NVFP4 runtime error).", "right": "issue:44292"}, {"accept": false, "left": "issue:44292", "reason": "Different code paths: NVFP4 model execution error vs TRL SFT embedding unpacking issue.", "right": "issue:44918"}, {"accept": false, "left": "issue:43039", "reason": "One is about Liger Kernel cross_entropy dispatch; the other is TRL SFT input embedding unpacking.", "right": "issue:44918"}, {"accept": false, "left": "issue:41762", "reason": "Both hit ZeRO-3 loading, but the concrete failures differ: Gemma3 loading index error vs non-pretrained Bert training index error.", "right": "issue:43638"}, {"accept": false, "left": "issue:43827", "reason": "Same general docs/v5 theme, but different removed command references and likely different documentation locations.", "right": "issue:44512"}, {"accept": false, "left": "issue:43716", "reason": "Different issues: image preprocessor/model dtype mismatch vs Qwen3-vl video StopIteration.", "right": "issue:44560"}, {"accept": false, "left": "issue:43716", "reason": "Dtype mismatch on a vision-language model is not the same as Qwen3.5 embedding unpacking with TRL SFT.", "right": "issue:44918"}, {"accept": false, "left": "issue:43039", "reason": "Liger Kernel cross_entropy routing and autocast dtype mismatch are related only at a high level, not the same concrete bug.", "right": "issue:43828"}, {"accept": false, "left": "issue:30277", "reason": "Both involve training-time runtime errors, but one is collective mismatch with ZeRO-3 and the other is a Trainer API signature break.", "right": "issue:36331"}, {"accept": false, "left": "issue:43039", "reason": "Cross-entropy dispatch with Liger Kernel is unrelated to a mask/index shape mismatch error.", "right": "issue:44805"}, {"accept": false, "left": "issue:34928", "reason": "Activation checkpointing/FSDP recomputation issue is unrelated to the GenerationMixin import error.", "right": "issue:36010"}], "summary": "This cluster is heterogeneous and looks like a set of unrelated issues that were scored as superficially similar by text overlap. The soft pairs mostly share broad keywords (dtype mismatch, ZeRO-3, Qwen, docs, etc.) but not the same concrete failure path, model, or fix target."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43541|issue:43716", "issue:44918|issue:45161", "issue:34928|issue:41720", "issue:43828|issue:44292", "issue:43716|issue:44292", "issue:44292|issue:44918", "issue:43039|issue:44918", "issue:41762|issue:43638", "issue:43827|issue:44512", "issue:43716|issue:44560", "issue:43716|issue:44918", "issue:43039|issue:43828", "issue:30277|issue:36331", "issue:43039|issue:44805", "issue:34928|issue:36010"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7122, "estimated_input_tokens": 3433, "item_count": 18, "node_count": 18, "serialized_chars": 13732, "soft_pair_count": 11}, "cached_at": "2026-04-14T20:11:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "bc69598399edeb0d855bea951a075d66542aad04196100a8f766cd766eb709ca", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39290", "issue:41093", "issue:41720", "issue:41762", "issue:43039", "issue:43404", "issue:43425", "issue:43531", "issue:43596", "issue:43638", "issue:43792", "issue:43854", "issue:43866", "issue:43901", "issue:44512", "issue:44560", "issue:44863", "issue:45070"], "result": {"analyst_result": {"best_issue_reason": "#45070 is the best issue to keep as the representative ticket because it is a current, core-framework regression rather than a narrow model-specific or docs-only report. It is the least specific and most likely to subsume nearby config-validation regressions.", "best_pr_reason": null, "canonical_issue_reason": "If forced to pick one issue as the cluster anchor, #45070 is the broadest and most general regression report (core PretrainedConfig/pydantic breakage) and is more representative than the many model-specific tickets. This is still a weak canonical choice because the cluster is not truly cohesive.", "canonical_pr_reason": null, "confidence": 0.78, "soft_edge_verdicts": [{"accept": false, "left": "issue:41762", "reason": "Both are IndexError reports under DeepSpeed ZeRO-3, but they involve different models and different code paths (Gemma3 loading vs BertModel init). Too weak to treat as the same bug.", "right": "issue:43596"}, {"accept": false, "left": "issue:43901", "reason": "Both are documentation-related, but they concern different docs and different behavior changes. Not the same underlying issue.", "right": "issue:44512"}, {"accept": false, "left": "issue:39290", "reason": "Different model families and failures: Gemma3 config attribute error vs Qwen3 auto device mapping cudaErrorAssert. No concrete shared bug.", "right": "issue:41720"}, {"accept": false, "left": "issue:43039", "reason": "Liger Kernel cross_entropy routing is unrelated to a Qwen3-vl-embedding video StopIteration failure. Different subsystems and failure modes.", "right": "issue:44560"}, {"accept": false, "left": "issue:43866", "reason": "Both are loading-related, but one reports a corrupted Ovis2 checkpoint and the other a NemotronH implementation/checkpoint loading problem. Not the same issue.", "right": "issue:44863"}, {"accept": false, "left": "issue:43854", "reason": "Different models and different root causes: GLM-4.7-Flash test loading failure vs Ovis2 checkpoint corruption.", "right": "issue:43866"}, {"accept": false, "left": "issue:43425", "reason": "Torch 2.10 incompatibility is a version/support issue, while #45070 is a PretrainedConfig pydantic regression. Distinct bugs.", "right": "issue:45070"}, {"accept": false, "left": "issue:39290", "reason": "Both mention sliding_window, but one is a missing config field in Gemma3 and the other is a Qwen3-MoE sliding_window behavior issue. Not enough evidence of one shared bug.", "right": "issue:43531"}, {"accept": false, "left": "issue:41093", "reason": "Both are IndexErrors, but the shapes/modes differ and the second is specifically ZeRO-3 with BertModel. Too generic to merge.", "right": "issue:43638"}, {"accept": false, "left": "issue:43792", "reason": "Whisper loading failure and PretrainedConfig/pydantic breakage are unrelated.", "right": "issue:45070"}, {"accept": false, "left": "issue:43404", "reason": "Mistral3 lm_head tying bug is unrelated to an Ovis2 checkpoint corruption report.", "right": "issue:43866"}], "summary": "The cluster is highly heterogeneous: it mixes unrelated model-loading bugs, documentation issues, version-compatibility reports, and a few superficially similar IndexError/sliding_window tickets. None of the soft pairs look like safe duplicate merges."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:41762|issue:43596", "issue:43901|issue:44512", "issue:39290|issue:41720", "issue:43039|issue:44560", "issue:43866|issue:44863", "issue:43854|issue:43866", "issue:43425|issue:45070", "issue:39290|issue:43531", "issue:41093|issue:43638", "issue:43792|issue:45070", "issue:43404|issue:43866"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7120, "estimated_input_tokens": 3432, "item_count": 18, "node_count": 18, "serialized_chars": 13725, "soft_pair_count": 11}, "cached_at": "2026-04-14T20:12:02Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4a75bbb90be924183a00452c341945b56ead68c0eed03dbf819324fc0306cdc3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:39290", "issue:41093", "issue:41762", "issue:43296", "issue:43366", "issue:43531", "issue:43541", "issue:43572", "issue:43828", "issue:44291", "issue:44387", "issue:44589", "issue:44841", "issue:45005", "issue:45070", "issue:45084", "issue:45161"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43541", "reason": "Both involve MoE/runtime errors, but one is a grouped_mm tracing failure in MixtralForCausalLM and the other is an autocast dtype mismatch in Phi-tiny-MoE; different models and code paths.", "right": "issue:43828"}, {"accept": false, "left": "issue:36010", "reason": "These are distinct regressions: one is an import/export failure for GenerationMixin, the other is a Gemma3TextConfig attribute missing during vLLM loading.", "right": "issue:39290"}, {"accept": false, "left": "issue:44841", "reason": "Voxtral processor failure and a generic 'Can't compile non template nodes' error are unrelated symptoms with no shared code path or fix target.", "right": "issue:45084"}, {"accept": false, "left": "issue:41093", "reason": "Both are IndexError reports, but one is a mask/tensor length mismatch and the other is an empty-tensor index error when loading Gemma3 with DeepSpeed ZeRO-3; not the same bug.", "right": "issue:41762"}, {"accept": false, "left": "issue:43296", "reason": "PaddleOCR-VL loading in vLLM is a concrete model-integration issue, while gpt-oss GGUF support is a feature request/compatibility topic; not duplicates.", "right": "issue:43366"}, {"accept": false, "left": "issue:43572", "reason": "StableLmConfig missing pad_token_idx after 5.0 and a PretrainedConfig pydantic-field regression are both config issues, but they affect different fields and failure modes.", "right": "issue:45070"}, {"accept": false, "left": "issue:44387", "reason": "Int4 quantization OOM from CUDA reserved memory is unrelated to tied-weights behavior in translation models; different subsystems and fixes.", "right": "issue:45005"}, {"accept": false, "left": "issue:43541", "reason": "Both mention MoE/GPT-OSS-ish model loading concerns, but one is a torch dynamo grouped_mm tracing runtime error and the other is tensor-parallelism not working; not the same underlying defect.", "right": "issue:45161"}, {"accept": false, "left": "issue:44291", "reason": "The init_empty_weights TypeError from an unexpected _is_hf_initialized argument is a constructor/signature compatibility bug, not the same as the pydantic PretrainedConfig field regression.", "right": "issue:45070"}, {"accept": false, "left": "issue:43531", "reason": "A sliding_window bug in Qwen3-MoE models is unrelated to a PretrainedConfig pydantic model-field breakage; they do not share the same code path.", "right": "issue:45070"}, {"accept": false, "left": "issue:43541", "reason": "Grouped_mm tracing in MixtralForCausalLM and missing Float8_e4m3fnStorage are separate runtime/backend issues with different triggers and fixes.", "right": "issue:44589"}], "summary": "This cluster is heterogeneous: most items are unrelated bug reports spanning config/schema regressions, model-specific loading failures, runtime shape errors, and quantization/autocast issues. The soft-similarity pairs only share broad surface language, not the same concrete bug or fix path, so none should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43541|issue:43828", "issue:36010|issue:39290", "issue:44841|issue:45084", "issue:41093|issue:41762", "issue:43296|issue:43366", "issue:43572|issue:45070", "issue:44387|issue:45005", "issue:43541|issue:45161", "issue:44291|issue:45070", "issue:43531|issue:45070", "issue:43541|issue:44589"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6952, "estimated_input_tokens": 3348, "item_count": 18, "node_count": 18, "serialized_chars": 13391, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:12:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5b04d8441024d02b1d87df35a681bf3d1be76ef96f8169b209025cbecd35fa6e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39186", "issue:39290", "issue:40990", "issue:41093", "issue:42915", "issue:43054", "issue:43404", "issue:43541", "issue:43596", "issue:43645", "issue:43742", "issue:43994", "issue:44530", "issue:44863", "issue:44918", "issue:44960", "issue:45005", "issue:45399"], "result": {"analyst_result": {"best_issue_reason": "issue:45005 best fits as the cluster representative because it is general, current, and describes a reusable failure mode rather than a one-off model-specific symptom.", "best_pr_reason": null, "canonical_issue_reason": "issue:45005 is the broadest open report here and the most umbrella-like regression theme ('tied weights in v5'), so it is the best representative of the set even though most other items are unrelated.", "canonical_pr_reason": null, "confidence": 0.76, "soft_edge_verdicts": [{"accept": false, "left": "issue:39290", "reason": "Different failure modes and model families: Gemma3/vLLM attribute error vs MobileLLM load key error. Similarity is only superficial.", "right": "issue:43742"}, {"accept": false, "left": "issue:43054", "reason": "Both mention SigLIP2, but one is about poor text embeddings and the other about nonsensical AutoModel/pipeline output. Not enough evidence they are the same concrete bug.", "right": "issue:43994"}, {"accept": false, "left": "issue:41093", "reason": "Both are tensor/indexing-style errors, but the shapes, stack context, and setup differ; no clear shared underlying code path.", "right": "issue:43596"}, {"accept": false, "left": "issue:42915", "reason": "Different models and runtime contexts: Qwen3Moe FP8 loading vs Mixtral grouped_mm failure during Dynamo tracing.", "right": "issue:43541"}, {"accept": false, "left": "issue:40990", "reason": "Completely different reported problems and model families; only broad 'model quality' similarity.", "right": "issue:44960"}, {"accept": false, "left": "issue:39186", "reason": "FSDP weight-shape runtime error vs TRL SFT embedding unpacking failure are distinct symptoms and likely distinct fixes.", "right": "issue:44918"}, {"accept": false, "left": "issue:43404", "reason": "Both involve weights, but one is a specific Mistral3 lm_head tying bug and the other is a broad v5 translation-model tied-weights issue; not clearly the same code path.", "right": "issue:45005"}, {"accept": false, "left": "issue:43645", "reason": "Custom-model Jupyter initialization breakage is unrelated to the flash-attn fallback gating problem.", "right": "issue:45399"}, {"accept": false, "left": "issue:44530", "reason": "PagedAttentionCache linear_attention crash and tied-weights translation-model issues are unrelated subsystems and failure modes.", "right": "issue:45005"}, {"accept": false, "left": "issue:44863", "reason": "NemotronH checkpoint loading is a model implementation issue, not the same as v5 tied-weights behavior in translation models.", "right": "issue:45005"}], "summary": "This cluster is mostly a loose collection of unrelated model/runtime bugs. Under a strict duplicate standard, none of the soft pairs are clear same-code-path duplicates; the SigLIP2 pair is the closest but still too vague to merge confidently."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:39290|issue:43742", "issue:43054|issue:43994", "issue:41093|issue:43596", "issue:42915|issue:43541", "issue:40990|issue:44960", "issue:39186|issue:44918", "issue:43404|issue:45005", "issue:43645|issue:45399", "issue:44530|issue:45005", "issue:44863|issue:45005"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6876, "estimated_input_tokens": 3310, "item_count": 18, "node_count": 18, "serialized_chars": 13240, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:12:36Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fbd53343bdf9a432ff8dc41272859ad3e6b2fecc35e7ad1b4f63ed9f9bf92911", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39692", "issue:41553", "issue:42915", "issue:43054", "issue:43644", "issue:43828", "issue:43873", "issue:43883", "issue:43950", "issue:43975", "issue:44292", "issue:44360", "issue:44492", "issue:44512", "issue:44534", "issue:44928", "issue:45005", "issue:45399"], "result": {"analyst_result": {"best_issue_reason": "43950 is the best representative issue because it precisely states the regression and likely serves as the cleanest canonical target for the duplicate buffer-corruption reports.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43950 is the clearest and most specific report of the non-persistent buffer corruption regression, with the strongest framing of the underlying bug and impact.", "canonical_pr_reason": null, "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "issue:43873", "reason": "Both touch quantization/tied-weights behavior, but they describe different failures: offloading with quantization vs translation models with tied weights. Not the same bug.", "right": "issue:45005"}, {"accept": false, "left": "issue:42915", "reason": "Qwen3Moe FP8 failure and flash-attn fallback blocking are unrelated code paths and symptoms.", "right": "issue:45399"}, {"accept": false, "left": "issue:44360", "reason": "A DSA indexer ReLU discussion and a docs typo about a removed command are not the same underlying issue.", "right": "issue:44512"}, {"accept": false, "left": "issue:43975", "reason": "Incorrect detokenization for one model and an error running a different quantized Qwen model are distinct bugs.", "right": "issue:44292"}, {"accept": false, "left": "issue:44492", "reason": "A documentation typo and outdated docs mentioning a removed command are separate documentation issues, not duplicates.", "right": "issue:44512"}, {"accept": true, "left": "issue:43950", "reason": "Both report the same transformers v5 regression where non-persistent buffers are mishandled/corrupted during loading.", "right": "issue:44534"}, {"accept": true, "left": "issue:43644", "reason": "Same underlying bug: v5 fills or corrupts non-persistent buffers registered with persistent=False.", "right": "issue:43950"}, {"accept": false, "left": "issue:43828", "reason": "Different model/runtime failures: autocast dtype mismatch versus RLHF NaN explosion from 3D position_ids and SDPA fallback.", "right": "issue:44928"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 doc example errors and worse text embeddings are both SigLIP2-related, but they are different problems.", "right": "issue:43054"}, {"accept": false, "left": "issue:41553", "reason": "Bad AutoTokenizer error handling for Voxtral and a missing attribute on MolmoForCausalLM are unrelated.", "right": "issue:43883"}], "summary": "Mostly a heterogeneous set of unrelated issues, with one clear duplicate subgroup around transformers v5 corrupting non-persistent buffers. I would merge 43644 and 44534 into 43950; the other soft pairs look semantically similar only at the subsystem/topic level, not the same concrete bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43873|issue:45005", "issue:42915|issue:45399", "issue:44360|issue:44512", "issue:43975|issue:44292", "issue:44492|issue:44512", "issue:43950|issue:44534", "issue:43644|issue:43950", "issue:43828|issue:44928", "issue:39692|issue:43054", "issue:41553|issue:43883"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7068, "estimated_input_tokens": 3406, "item_count": 18, "node_count": 18, "serialized_chars": 13621, "soft_pair_count": 11}, "cached_at": "2026-04-14T20:12:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f1252b40796bc4f0c7c3ef908828d34931a77c7e33ac3a5c45e5f3ee3dfa15a3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36331", "issue:38175", "issue:41553", "issue:41762", "issue:42947", "issue:43054", "issue:43493", "issue:43643", "issue:43856", "issue:43866", "issue:43872", "issue:43881", "issue:43883", "issue:44387", "issue:44403", "issue:44451", "issue:44589", "issue:45020"], "result": {"analyst_result": {"best_issue_reason": "issue:43493 has the clearest root-cause framing and is the strongest candidate for the SigLIP2-related problem in this set.", "best_pr_reason": null, "canonical_issue_reason": "issue:43493 is the best representative of the only likely duplicate pair because it states the underlying SigLIP2 HF-vs-JAX implementation discrepancy more directly; most other items are unrelated and should not be merged into a single canonical bug.", "canonical_pr_reason": null, "confidence": 0.87, "soft_edge_verdicts": [{"accept": false, "left": "issue:43866", "reason": "Different problems: corrupted checkpoint vs noisy load logging.", "right": "issue:44403"}, {"accept": false, "left": "issue:36331", "reason": "Trainer API TypeError in compute_loss is unrelated to Gemma3 ZeRO-3 loading IndexError.", "right": "issue:41762"}, {"accept": false, "left": "issue:43872", "reason": "Both involve dependency/quantization incompatibilities, but the concrete failures differ (_is_hf_initialized kwarg vs missing Float8 storage).", "right": "issue:44589"}, {"accept": false, "left": "issue:41553", "reason": "Different models and failure modes: bad AutoTokenizer error message for Voxtral vs generic loading failure for glm-4v-9b.", "right": "issue:43881"}, {"accept": false, "left": "issue:38175", "reason": "Both concern SigLIP2 behavior, but one reports zero probabilities and the other degraded embeddings; not clearly the same bug.", "right": "issue:43054"}, {"accept": false, "left": "issue:42947", "reason": "Both are memory-related, but LoRA gradient checkpointing and int4 reserved-memory OOM are distinct code paths.", "right": "issue:44387"}, {"accept": false, "left": "issue:43881", "reason": "Different model-load failures with different underlying errors; not the same bug.", "right": "issue:43883"}, {"accept": true, "left": "issue:43054", "reason": "Likely the same SigLIP2 implementation discrepancy: HF text embeddings/performance diverge from the original JAX implementation.", "right": "issue:43493"}, {"accept": false, "left": "issue:44403", "reason": "Unnecessary loading noise is not the same as a model that cannot be loaded.", "right": "issue:44451"}, {"accept": false, "left": "issue:42947", "reason": "Both are training/memory complaints, but they involve different workloads and mechanisms.", "right": "issue:43856"}, {"accept": false, "left": "issue:43643", "reason": "Remote-code regressions are broad; missing fields in AutoConfig is too specific to merge with the generic recent-version breakage report without stronger evidence.", "right": "issue:45020"}], "summary": "This cluster is mostly heterogeneous: the reported issues span unrelated models, loading failures, training/memory problems, and dependency incompatibilities. Only the SigLIP2 pair looks like a plausible duplicate; the rest should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43866|issue:44403", "issue:36331|issue:41762", "issue:43872|issue:44589", "issue:41553|issue:43881", "issue:38175|issue:43054", "issue:42947|issue:44387", "issue:43881|issue:43883", "issue:43054|issue:43493", "issue:44403|issue:44451", "issue:42947|issue:43856", "issue:43643|issue:45020"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6694, "estimated_input_tokens": 3219, "item_count": 17, "node_count": 17, "serialized_chars": 12875, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:13:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "33a882d5cfb9d8fb62b09dccc3bf40fc448cc0b5a4fde435ca4f00760e6213bc", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41762", "issue:42491", "issue:42915", "issue:43054", "issue:43278", "issue:43782", "issue:43824", "issue:43872", "issue:43975", "issue:44368", "issue:44403", "issue:44451", "issue:44488", "issue:44661", "issue:44960", "issue:45020", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "issue:45020 is the best single issue to anchor the cluster because it captures the widest compatibility/regression surface among the set. It is still not a true umbrella for many of the other reports, but it is the closest fit.", "best_pr_reason": null, "canonical_issue_reason": "issue:45020 is the broadest representative of the shared theme: recent transformers changes breaking model loading/remote-code paths. The other issues are mostly narrower, model-specific failures or unrelated tokenizer/embedding/quantization problems.", "canonical_pr_reason": null, "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "issue:44403", "reason": "Both involve loading-related behavior, but one is about noisy warnings while the other is a specific model load failure for sleng-bert; not the same bug.", "right": "issue:44488"}, {"accept": false, "left": "issue:43782", "reason": "Different models and different failure modes: Qwen3VL weight_only loading vs GLM5, with no clear shared code-path bug.", "right": "issue:44960"}, {"accept": false, "left": "issue:42491", "reason": "Both mention Qwen/LoRA, but one is a cross-version model usability problem and the other is a training-time warning about tie_word_embeddings; not duplicate enough.", "right": "issue:44368"}, {"accept": false, "left": "issue:41762", "reason": "Separate model-specific failures in different execution modes: Gemma3 ZeRO-3 loading vs Qwen3Moe with FineGrainedFP8Config.", "right": "issue:42915"}, {"accept": false, "left": "issue:43975", "reason": "Tokenizer detokenization corruption and model load failure are unrelated bugs.", "right": "issue:44451"}, {"accept": false, "left": "issue:45020", "reason": "Both are regressions, but one is about remote_code model loading and the other is tokenizer codec/warning behavior for Kimi-K2.5.", "right": "issue:45356"}, {"accept": false, "left": "issue:44661", "reason": "Both touch model registration/loading internals, but the reported failures are different concrete bugs and would not plausibly merge into one PR.", "right": "issue:45020"}, {"accept": false, "left": "issue:43824", "reason": "ImportError for a missing Qwen VL class and a bitsandbytes Int8Params constructor mismatch are distinct incompatibility issues.", "right": "issue:43872"}, {"accept": false, "left": "issue:43975", "reason": "DeepSeek detokenization and sleng-bert loading are unrelated.", "right": "issue:44488"}, {"accept": false, "left": "issue:43054", "reason": "One is a SigLIP2 text-embedding quality complaint; the other is an embedding dtype mismatch between train and eval. Related area, but not the same bug.", "right": "issue:43278"}], "summary": "The cluster is loosely related by model-loading/model-compatibility regressions, but the items span many distinct bugs: Qwen/Gemma/DeepSeek/VL loading, tokenizer regressions, dtype/quantization issues, and embedding-quality complaints. None of the soft pairs look like true duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44403|issue:44488", "issue:43782|issue:44960", "issue:42491|issue:44368", "issue:41762|issue:42915", "issue:43975|issue:44451", "issue:45020|issue:45356", "issue:44661|issue:45020", "issue:43824|issue:43872", "issue:43975|issue:44488", "issue:43054|issue:43278"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6882, "estimated_input_tokens": 3313, "item_count": 17, "node_count": 17, "serialized_chars": 13251, "soft_pair_count": 11}, "cached_at": "2026-04-14T20:13:31Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "24e09a81d37ee0d1820cea9ced4721e62a9c256bef9b7cb78b1291798691dd03", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41553", "issue:42915", "issue:43257", "issue:43404", "issue:43742", "issue:43792", "issue:43827", "issue:43866", "issue:43872", "issue:43950", "issue:44220", "issue:44291", "issue:44509", "issue:44991", "issue:45020", "issue:45081", "issue:45399"], "result": {"analyst_result": {"best_issue_reason": "44291 is the strongest representative issue in the cluster because it names the concrete failing code path and root cause most directly.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44291 is the clearest and most general statement of the shared `_is_hf_initialized` regression; 43872 looks like a downstream bitsandbytes manifestation of the same loading-path bug.", "canonical_pr_reason": null, "confidence": 0.81, "soft_edge_verdicts": [{"accept": false, "left": "issue:44991", "reason": "Both are recent Transformers regressions, but one is tokenizer loading for a specific model and the other is flash-attn fallback gating; different code paths and fixes.", "right": "issue:45399"}, {"accept": false, "left": "issue:43792", "reason": "Both are audio/speech-related, but the titles suggest a broad model failure versus a specific `_torch_extract_fbank_features()` bug; not enough to treat as the same underlying defect.", "right": "issue:44220"}, {"accept": true, "left": "issue:43827", "reason": "Both report the same docs regression: summarization/translation pipeline task names still mentioned after v5 removed those pipelines.", "right": "issue:44509"}, {"accept": false, "left": "issue:45081", "reason": "Mistral tokenizer regex crash and flash-attn fallback blocking are unrelated failures in different parts of the loading stack.", "right": "issue:45399"}, {"accept": false, "left": "issue:43404", "reason": "One is a tied-weights bug in a specific multimodal model class; the other is a generic from_pretrained buffer regression. Different symptoms and likely different fixes.", "right": "issue:43950"}, {"accept": false, "left": "issue:42915", "reason": "Qwen3MoE FP8 failure and remote_code breakage are both broad loading issues, but they are not the same model/pathology.", "right": "issue:45020"}, {"accept": false, "left": "issue:43257", "reason": "These concern different models and different failure modes: weight conversion under accelerate+deepspeed versus a corrupted checkpoint.", "right": "issue:43866"}, {"accept": false, "left": "issue:41553", "reason": "Both are tokenizer-related, but they target different models and different failure messages; no evidence they share the same bug.", "right": "issue:44991"}, {"accept": false, "left": "issue:43742", "reason": "A KeyError for MobileLLM-125M is not the same concrete issue as the broader remote_code regression.", "right": "issue:45020"}, {"accept": true, "left": "issue:43872", "reason": "Both mention the same unexpected `_is_hf_initialized` argument during model loading; 43872 frames it through bitsandbytes Int8Params, while 44291 shows the same underlying regression via init_empty_weights.", "right": "issue:44291"}], "summary": "This cluster is mostly a mix of unrelated Transformers bug reports. I found two genuine duplicate-like pairs: the v5 docs issues about removed pipeline tasks, and the `_is_hf_initialized` loading regression seen via both bitsandbytes and init_empty_weights. The rest are only loosely similar by subsystem (tokenizers, audio, MoE, remote_code) and should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44991|issue:45399", "issue:43792|issue:44220", "issue:43827|issue:44509", "issue:45081|issue:45399", "issue:43404|issue:43950", "issue:42915|issue:45020", "issue:43257|issue:43866", "issue:41553|issue:44991", "issue:43742|issue:45020", "issue:43742|issue:45399", "issue:43872|issue:44291"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6710, "estimated_input_tokens": 3227, "item_count": 17, "node_count": 17, "serialized_chars": 12907, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:13:51Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4cbe5f71964cb597b72460a010d2c8809323538b918225cc605c5157b991bcaf", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36331", "issue:41093", "issue:41553", "issue:42915", "issue:43257", "issue:43854", "issue:43872", "issue:43940", "issue:44479", "issue:44488", "issue:44530", "issue:44560", "issue:44805", "issue:44863", "issue:45084", "issue:45305", "issue:45313"], "result": {"analyst_result": {"best_issue_reason": "issue:45313 is the closest representative only because it is a concrete, current Qwen/DeepSpeed loading report, but it does not subsume the rest of the cluster.", "best_pr_reason": null, "canonical_issue_reason": "No clear canonical issue: the items describe different underlying bugs rather than one shared defect.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43940", "reason": "Both involve Qwen + DeepSpeed ZeRO-3 loading, but the model families and failure modes differ ('all params MISSING' vs 'language_model' load failure). Too broad to treat as the same bug.", "right": "issue:45313"}, {"accept": false, "left": "issue:42915", "reason": "Different quantization paths and errors: FineGrainedFP8Config failure vs bitsandbytes Int8Params kwarg incompatibility. Not the same underlying issue.", "right": "issue:43872"}, {"accept": false, "left": "issue:41553", "reason": "Both are loading-related, but one is a bad AutoTokenizer error for Voxtral and the other is a failure to load cjvt/sleng-bert. Different models and fix scope.", "right": "issue:44488"}, {"accept": false, "left": "issue:43854", "reason": "Both concern model loading, but they are for different architectures/checkpoints (GLM-4.7-Flash vs NemotronH) and likely distinct code paths.", "right": "issue:44863"}, {"accept": false, "left": "issue:36331", "reason": "A custom trainer kwarg regression and a tensor mask shape mismatch are unrelated bugs with different symptoms and code paths.", "right": "issue:41093"}, {"accept": false, "left": "issue:43257", "reason": "Both touch DeepSpeed/Qwen training, but one is about weight conversion during load and the other about gradient averaging with GAS. Not the same concrete bug.", "right": "issue:45305"}, {"accept": false, "left": "issue:41093", "reason": "Both are shape-mismatch IndexErrors, but there is no evidence they arise from the same code path or same fix.", "right": "issue:44805"}, {"accept": false, "left": "issue:44479", "reason": "Both are Qwen video regressions in 5.3.0, but one is a broad regression across several models and the other is a StopIteration in qwen3-vl-embedding. Too specific to merge.", "right": "issue:44560"}, {"accept": false, "left": "issue:44530", "reason": "PagedAttentionCache/linear_attention crash and NemotronH checkpoint loading are unrelated issues.", "right": "issue:44863"}, {"accept": false, "left": "issue:43872", "reason": "bitsandbytes Int8Params constructor incompatibility and 'Can't compile non template nodes' are different failures with no shared underlying bug.", "right": "issue:45084"}], "summary": "This cluster is heterogeneous: the items span unrelated bugs in trainer APIs, tensor shape mismatches, model/tokenizer loading, DeepSpeed/quantization, and video handling. I would not merge any of the soft pairs as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43940|issue:45313", "issue:42915|issue:43872", "issue:41553|issue:44488", "issue:43854|issue:44863", "issue:36331|issue:41093", "issue:43257|issue:45305", "issue:41093|issue:44805", "issue:44479|issue:44560", "issue:44530|issue:44863", "issue:43872|issue:45084"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7094, "estimated_input_tokens": 3419, "item_count": 18, "node_count": 18, "serialized_chars": 13676, "soft_pair_count": 11}, "cached_at": "2026-04-14T20:14:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "aa9dc8924d21c81ff6dfb02a2298814fb5f9a7aecf8f7de7d55a2db71a5475d4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:24643", "issue:30064", "issue:42617", "issue:42915", "issue:43366", "issue:43531", "issue:43550", "issue:43645", "issue:43646", "issue:43824", "issue:43828", "issue:43950", "issue:43957", "issue:44560", "issue:44589", "issue:44910", "issue:44918", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": "Issue 44910 is the most technically specific and actionable report in the set, with a concrete root-cause statement (3D position_ids misread as packed sequence) and a well-defined failure mode.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43645 is the stronger representative of the only true duplicate pair: it describes the same custom-model initialization regression as 43646, with the added Jupyter notebook context and clearer reproduction scope.", "canonical_pr_reason": null, "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "issue:43366", "reason": "Different bugs: GGUF/gpt-oss architecture support vs a Phi MoE dtype mismatch under autocast. No shared code path or concrete fix overlap.", "right": "issue:43828"}, {"accept": false, "left": "issue:24643", "reason": "Unrelated failures: DeepSpeed training RuntimeError about a 2-D weight tensor vs image processor handling of void segmentation maps.", "right": "issue:30064"}, {"accept": false, "left": "issue:43366", "reason": "Both mention Qwen/MoE-style models, but one is GGUF architecture support and the other is a sliding_window issue. The underlying bug and code path differ.", "right": "issue:43531"}, {"accept": false, "left": "issue:44560", "reason": "Both involve Qwen3.x, but one is a video StopIteration in Qwen3-vl-embedding and the other is TRL SFT unpacking embeddings. Different failure points and fixes.", "right": "issue:44918"}, {"accept": false, "left": "issue:43366", "reason": "Completely different symptoms: gpt-oss GGUF support vs missing Float8 storage lookup. No evidence of the same root cause.", "right": "issue:44589"}, {"accept": false, "left": "issue:43950", "reason": "Loading/regression around non-persistent buffers is a different issue from Qwen2.5-VL rope index scaling for still-image temporal position_ids.", "right": "issue:45325"}, {"accept": true, "left": "issue:43645", "reason": "These are the same regression: Transformers 5.0 breaking custom model initialization. The notebook/Jupyter detail in 43645 is just an environment-specific manifestation of the same bug.", "right": "issue:43646"}, {"accept": false, "left": "issue:43550", "reason": "Different concrete problems and model paths: torch.compile/SDPA on Bamba-9B-v2 vs Flash Attention illegal memory access on Qwen3.5 due to position_ids handling.", "right": "issue:44910"}, {"accept": false, "left": "issue:43824", "reason": "Import error for a missing Qwen2.5-VL class is not the same as meta-device model loading failures. Different symptom and likely different fix.", "right": "issue:43957"}, {"accept": false, "left": "issue:42915", "reason": "Qwen3MoE FineGrainedFP8Config failure and meta-device loading failures are distinct model/runtime regressions, not the same underlying bug.", "right": "issue:43957"}, {"accept": false, "left": "issue:42617", "reason": "3d_parallel.py execution failure is unrelated to GGUF gpt-oss architecture support.", "right": "issue:43366"}], "summary": "Mostly unrelated issue reports across different models/features; only the two Transformers 5.0 custom model initialization reports appear to be the same underlying regression. All other soft pairs are false positives and should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43366|issue:43828", "issue:24643|issue:30064", "issue:43366|issue:43531", "issue:44560|issue:44918", "issue:43366|issue:44589", "issue:43950|issue:45325", "issue:43645|issue:43646", "issue:43550|issue:44910", "issue:43824|issue:43957", "issue:42915|issue:43957", "issue:42617|issue:43366"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6980, "estimated_input_tokens": 3362, "item_count": 18, "node_count": 18, "serialized_chars": 13446, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:14:21Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3e0c474748f8008a37cc009c00ed32c8a23922fcbf579700cbfcf69fee5fa1f1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:38175", "issue:39692", "issue:42915", "issue:43317", "issue:43596", "issue:43638", "issue:43644", "issue:43646", "issue:43950", "issue:43994", "issue:44534", "issue:44560", "issue:44661", "issue:44756", "issue:44805", "issue:44877", "issue:45030"], "result": {"analyst_result": {"best_issue_reason": "As a representative issue in this mixed cluster, 43950 is the most complete and actionable bug report, and it anchors one of the strongest duplicate subgroups in the set.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43950 is the clearest canonical for the non-persistent-buffer regression subgroup: it states the root symptom explicitly, frames it as a Transformers 5.x regression, and matches the broader buffer-corruption reports better than the shorter variants.", "canonical_pr_reason": null, "confidence": 0.87, "soft_edge_verdicts": [{"accept": true, "left": "issue:38175", "reason": "Same SigLIP2 base model and same failure class: incorrect/degenerate outputs from the model/pipeline path.", "right": "issue:43994"}, {"accept": false, "left": "issue:44560", "reason": "Different concrete bugs: Qwen3-vl video StopIteration vs a mask/tensor shape mismatch.", "right": "issue:44805"}, {"accept": true, "left": "issue:43646", "reason": "Same underlying Transformers 5.x non-persistent-buffer corruption during model initialization/from_pretrained.", "right": "issue:43950"}, {"accept": true, "left": "issue:44877", "reason": "Both report stricter config validation rejecting otherwise valid model configs.", "right": "issue:45030"}, {"accept": false, "left": "issue:36010", "reason": "Unrelated failures: GenerationMixin import error vs Qwen3Moe FP8 config loading failure.", "right": "issue:42915"}, {"accept": false, "left": "issue:43638", "reason": "Different code paths and symptoms: DeepSpeed ZeRO3/Bert init index error vs tokenizer-mapping constraint in add-new-model-like.", "right": "issue:44661"}, {"accept": false, "left": "issue:43596", "reason": "BertModel/ZeRO3 index error is unrelated to the tokenizer-mapping issue in add-new-model-like.", "right": "issue:44661"}, {"accept": false, "left": "issue:43317", "reason": "Different underlying problems: dequantized-model offload loading failure vs mmap-related OOM on Strix Halo.", "right": "issue:44756"}, {"accept": true, "left": "issue:43644", "reason": "Both describe the same non-persistent-buffer junk/corruption regression in Transformers 5.x.", "right": "issue:44534"}, {"accept": false, "left": "issue:38175", "reason": "Both involve SigLIP2, but one is bad outputs while the other is a doc-example issue with model/processor mismatch and quantization failure.", "right": "issue:39692"}], "summary": "This cluster is actually several smaller duplicate groups mixed together: SigLIP2 output issues, non-persistent buffer corruption in Transformers 5.x, and config-validation regressions, plus multiple unrelated singleton bugs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:38175|issue:43994", "issue:44560|issue:44805", "issue:43646|issue:43950", "issue:44877|issue:45030", "issue:36010|issue:42915", "issue:43638|issue:44661", "issue:43596|issue:44661", "issue:43317|issue:44756", "issue:43644|issue:44534", "issue:38175|issue:39692"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6434, "estimated_input_tokens": 3089, "item_count": 17, "node_count": 17, "serialized_chars": 12356, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:14:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "da5f69cf3c5475a3e200fb59c842f0270f585e47b509610b8ab5f96ce680400c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39692", "issue:41720", "issue:42915", "issue:43708", "issue:43856", "issue:43873", "issue:43994", "issue:44075", "issue:44451", "issue:44488", "issue:44960", "issue:44977", "issue:44991", "issue:45313", "issue:45357", "issue:45362", "issue:45406"], "result": {"analyst_result": {"best_issue_reason": "Issue 44488 best captures the underlying bug because it has the clearest cross-reference signal and the broadest description of the load failure affecting multiple models.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44488 is the better representative for the shared load-failure regression: it\u2019s the later, more referenced report and covers the same failure mode seen in 44451 with another model.", "canonical_pr_reason": null, "confidence": 0.67, "soft_edge_verdicts": [{"accept": false, "left": "issue:45313", "reason": "Same model family, but different concrete bugs/code paths: ZeRO-3 weight loading vs save_pretrained visual-encoder key regression.", "right": "issue:45357"}, {"accept": false, "left": "issue:45362", "reason": "Different failures and components: Qwen3.5 chat crash versus Gemma4Processor missing _tokenizer in serve.", "right": "issue:45406"}, {"accept": false, "left": "issue:44960", "reason": "Different models and symptoms; this is not the same underlying bug.", "right": "issue:44977"}, {"accept": true, "left": "issue:44451", "reason": "Both report the same model-loading/tokenizer-loading regression on different BERT models, likely the same underlying bug.", "right": "issue:44488"}, {"accept": false, "left": "issue:43708", "reason": "Trainer checkpoint resume max_steps math is unrelated to SGD optimizer arguments not being used.", "right": "issue:44075"}, {"accept": false, "left": "issue:44488", "reason": "Both are loading-related, but one is a model-loading regression and the other is a tokenizer-loading failure for a different model; not the same concrete bug.", "right": "issue:44991"}, {"accept": false, "left": "issue:43856", "reason": "Both mention memory/offloading, but one is MoE training memory usage and the other is quantization offloading behavior.", "right": "issue:43873"}, {"accept": false, "left": "issue:41720", "reason": "Different Qwen3 variants and different failure modes: cudaErrorAssert on device mapping vs FineGrainedFP8Config failure.", "right": "issue:42915"}, {"accept": false, "left": "issue:39692", "reason": "Both involve SigLIP2, but one is a docs/model-processor mismatch plus quantization failure, while the other is bad outputs with AutoModel/pipeline.", "right": "issue:43994"}], "summary": "Most candidates are distinct issues. The only plausible duplicate pair is the two model-loading failures for different BERT repos (44451/44488), which look like the same tokenizer/model-loading regression. The rest differ by model, code path, or symptom."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:45313|issue:45357", "issue:45362|issue:45406", "issue:44960|issue:44977", "issue:44451|issue:44488", "issue:43708|issue:44075", "issue:44488|issue:44991", "issue:43856|issue:43873", "issue:41720|issue:42915", "issue:39692|issue:43994"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6964, "estimated_input_tokens": 3354, "item_count": 17, "node_count": 17, "serialized_chars": 13416, "soft_pair_count": 12}, "cached_at": "2026-04-14T20:15:22Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7f9e73ed2a2fcc1b545b26f28b522c03a85465e1337c434376d00be476ebea58", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42491", "issue:42915", "issue:43257", "issue:43404", "issue:43454", "issue:43644", "issue:43645", "issue:43720", "issue:43854", "issue:43856", "issue:43931", "issue:44387", "issue:44661", "issue:44683", "issue:44756", "issue:45276", "issue:45335"], "result": {"analyst_result": {"best_issue_reason": "issue:45276 is the best global issue representative: it is open, central to the only accepted near-duplicate pair, and describes the underlying API regression succinctly.", "best_pr_reason": null, "canonical_issue_reason": "issue:45276 is the strongest issue candidate because it has the clearest duplicate signal and reads as the broader Gemma embedding-resize bug, with issue:45335 as a close sibling report.", "canonical_pr_reason": null, "confidence": 0.67, "soft_edge_verdicts": [{"accept": false, "left": "issue:43854", "reason": "Both are model-loading failures, but for different models and failure modes; not the same underlying bug.", "right": "issue:43931"}, {"accept": false, "left": "issue:44661", "reason": "Different subsystems: tokenizer mapping generation vs compiled flex_attention on torch >= 2.9.", "right": "issue:44683"}, {"accept": false, "left": "issue:42915", "reason": "One is Qwen3MoE FP8 behavior, the other is Strix Halo mmap memory handling; no shared bug path.", "right": "issue:44756"}, {"accept": false, "left": "issue:42491", "reason": "Different APIs and model families: Qwen3 MoE LoRA compatibility vs Gemma token-embedding resizing.", "right": "issue:45276"}, {"accept": false, "left": "issue:42491", "reason": "Different model families and loading issues; one is Qwen3 MoE LoRA compatibility, the other BitNet packed-weight loading.", "right": "issue:43720"}, {"accept": false, "left": "issue:43257", "reason": "Both involve loading, but they concern unrelated models and mechanisms (accelerate/deepspeed conversion vs GLM unit-test loading).", "right": "issue:43854"}, {"accept": false, "left": "issue:43856", "reason": "Both mention memory/OOM, but the contexts differ too much to be the same bug (Qwen3 MoE training vs int4 quantization memory reservation).", "right": "issue:44387"}, {"accept": false, "left": "issue:43644", "reason": "Same release window, but different problems: junk-filled non-persistent buffers vs Jupyter custom-model initialization.", "right": "issue:43645"}, {"accept": false, "left": "issue:42491", "reason": "Different model-specific loading bugs; no indication of the same underlying failure.", "right": "issue:43931"}, {"accept": true, "left": "issue:45276", "reason": "Same core bug: resize_token_embeddings does not propagate to model embeddings in closely related Gemma/T5Gemma variants, so one fix could plausibly cover both.", "right": "issue:45335"}, {"accept": false, "left": "issue:43404", "reason": "Both are untied lm_head reports, but they affect different model classes (Mistral3 vs AyaVision) and likely need separate fixes.", "right": "issue:43454"}, {"accept": false, "left": "issue:42491", "reason": "Different Qwen3 MoE issues: LoRA compatibility vs training memory usage.", "right": "issue:43856"}], "summary": "Most items are unrelated model-specific bugs. The only clear near-duplicate is the Gemma resize_token_embeddings reports (45276/45335), which appear to describe the same missing propagation of resized embeddings across closely related model variants."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43854|issue:43931", "issue:44661|issue:44683", "issue:42915|issue:44756", "issue:42491|issue:45276", "issue:42491|issue:43720", "issue:43257|issue:43854", "issue:43856|issue:44387", "issue:43644|issue:43645", "issue:42491|issue:43931", "issue:45276|issue:45335", "issue:43404|issue:43454", "issue:42491|issue:43856"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6476, "estimated_input_tokens": 3110, "item_count": 17, "node_count": 17, "serialized_chars": 12437, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:15:39Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4f1830d0e29ffbc5051f59bd65578b04c01326b6c38b3412233b18cef3f6eb43", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:39404", "issue:39692", "issue:41720", "issue:42947", "issue:43381", "issue:43425", "issue:43873", "issue:43957", "issue:44155", "issue:44164", "issue:44291", "issue:44387", "issue:44402", "issue:44488", "issue:44756", "issue:44960"], "result": {"analyst_result": {"best_issue_reason": "No global best issue is suitable as a canonical representative because the matched issues cover unrelated defects rather than one root cause.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits the cluster: the candidate pairs are heterogeneous and do not share one concrete underlying bug.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:36010", "reason": "Import-path regression for GenerationMixin vs Whisper pipeline return_language behavior; different APIs and root causes.", "right": "issue:39404"}, {"accept": false, "left": "issue:44164", "reason": "save/from_pretrained extra_state handling and init_empty_weights unexpected kwarg are distinct loading/saving internals bugs.", "right": "issue:44291"}, {"accept": false, "left": "issue:44402", "reason": "Tokenizer vocab-size mismatch for one model is not the same as a separate model load failure for cjvt/sleng-bert.", "right": "issue:44488"}, {"accept": false, "left": "issue:41720", "reason": "Qwen3 auto device mapping cuda assert and AudioFlamingo3 batched embedding/token leakage are unrelated code paths.", "right": "issue:44155"}, {"accept": false, "left": "issue:43873", "reason": "Both concern quantization/memory, but one is offloading behavior and the other is increased reserved CUDA memory under int4; too different to merge.", "right": "issue:44387"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 documentation example problems are unrelated to the GLM5 issue.", "right": "issue:44960"}, {"accept": false, "left": "issue:43425", "reason": "Torch 2.10 compatibility and torch.device('meta') loading regressions are separate compatibility/loading issues.", "right": "issue:43957"}, {"accept": false, "left": "issue:44387", "reason": "Int4 quantization reserved-memory OOM and Strix Halo mmap OOM are different failure causes and fixes.", "right": "issue:44756"}, {"accept": false, "left": "issue:42947", "reason": "Gradient checkpointing with PEFT LoRA not taking effect is distinct from checkpointing being unusable in eval mode.", "right": "issue:43381"}], "summary": "These soft pairs are mostly broad similarity matches, not true duplicates. They span different models, code paths, and failure modes, so none should be merged into a single canonical bug report."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:36010|issue:39404", "issue:44164|issue:44291", "issue:44402|issue:44488", "issue:41720|issue:44155", "issue:43873|issue:44387", "issue:39692|issue:44960", "issue:43425|issue:43957", "issue:44387|issue:44756", "issue:42947|issue:43381"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6972, "estimated_input_tokens": 3358, "item_count": 18, "node_count": 18, "serialized_chars": 13429, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:15:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9e9c222e22466c8df1ade37324ed7a6d190379751d3586abeaaf8004b0d1aaeb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43381", "issue:43646", "issue:43708", "issue:43854", "issue:43957", "issue:44164", "issue:44291", "issue:44292", "issue:44479", "issue:44743", "issue:44756", "issue:44877", "issue:44912", "issue:44928", "issue:44960", "issue:45216", "issue:45276", "issue:45292"], "result": {"analyst_result": {"best_issue_reason": "45292 is the best representative because it describes the core API bug in the broadest, least model-specific terms and is the strongest duplicate anchor in this set.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45292 is the clearest general statement of the `resize_token_embeddings` / `output_embeddings` regression; 45276 reads like a Gemma4-specific variant of the same underlying bug.", "canonical_pr_reason": null, "confidence": 0.87, "soft_edge_verdicts": [{"accept": false, "left": "issue:44292", "reason": "Both involve loading failures, but one is about `init_empty_weights` / `_is_hf_initialized` and the other is about strict config handling for `granite_speech`; different bugs.", "right": "issue:44912"}, {"accept": false, "left": "issue:43854", "reason": "Both mention model loading/checkpoint behavior, but the concrete failures differ: GLM-4.7-Flash test loading vs Qwen3.5 save_pretrained checkpoint corruption.", "right": "issue:45216"}, {"accept": false, "left": "issue:44291", "reason": "`init_empty_weights` constructor argument error and Strix Halo mmap OOM are unrelated code paths and fixes.", "right": "issue:44756"}, {"accept": true, "left": "issue:45276", "reason": "These describe the same underlying `resize_token_embeddings` bug: output embeddings are not updated; 45276 is just Gemma4-specific wording.", "right": "issue:45292"}, {"accept": false, "left": "issue:44743", "reason": "Qwen recurrent-state reset with cache is unrelated to the GLM5 issue; same broad family but not the same bug.", "right": "issue:44960"}, {"accept": false, "left": "issue:43957", "reason": "One is about `torch.device(\"meta\")` loading regressions, the other about mmap on Strix Halo; no shared concrete failure.", "right": "issue:44756"}, {"accept": false, "left": "issue:43381", "reason": "Gradient checkpointing eval-mode behavior and RLHF NaN/SDPA fallback are different failures with different root causes.", "right": "issue:44928"}, {"accept": false, "left": "issue:44164", "reason": "`save/from_pretrained` extra_state handling and mmap OOM are unrelated save/load vs memory-mapping problems.", "right": "issue:44756"}, {"accept": false, "left": "issue:43646", "reason": "Custom model initialization breakage and `resume_from_checkpoint` max_steps miscalculation are different trainer/model-init bugs.", "right": "issue:43708"}, {"accept": false, "left": "issue:44479", "reason": "Video input regression in Qwen-VL models and strict granite_speech config loading are separate regressions despite both being version-related.", "right": "issue:44877"}], "summary": "This cluster is mostly a grab bag of unrelated Transformers bug reports spanning model loading, config parsing, training resume logic, quantization, and a single clear duplicate pair around `resize_token_embeddings` / `output_embeddings`."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44292|issue:44912", "issue:43854|issue:45216", "issue:44291|issue:44756", "issue:45276|issue:45292", "issue:44743|issue:44960", "issue:43957|issue:44756", "issue:43381|issue:44928", "issue:44164|issue:44756", "issue:43646|issue:43708", "issue:44479|issue:44877"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6930, "estimated_input_tokens": 3337, "item_count": 18, "node_count": 18, "serialized_chars": 13348, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:16:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4ad147edb767cb6601af55d9eb4bb87c6c23995526de42c05ac2f10614aa118a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43257", "issue:43425", "issue:43645", "issue:43716", "issue:43828", "issue:43906", "issue:43957", "issue:44164", "issue:44265", "issue:44402", "issue:44451", "issue:44530", "issue:44589", "issue:44756", "issue:44863", "issue:44898", "issue:45292", "issue:45335"], "result": {"analyst_result": {"best_issue_reason": "issue:45292 is the best representative issue in this cluster: it is the broadest and clearest statement of the duplicate bug, and it can absorb the more specific t5gemma variant.", "best_pr_reason": null, "canonical_issue_reason": "issue:45292 is the more canonical report because it states the underlying regression in broader terms (resize_token_embeddings not updating output embeddings), while issue:45335 looks like a model-specific instance of the same problem for t5gemma's decoder.embed_tokens.", "canonical_pr_reason": null, "confidence": 0.82, "soft_edge_verdicts": [{"accept": true, "left": "issue:45292", "reason": "Same underlying bug: resize_token_embeddings updates one embedding path but not the tied/output embedding path; the second issue is just a model-specific framing of the same failure.", "right": "issue:45335"}, {"accept": false, "left": "issue:44402", "reason": "Both mention tokenizer/model loading problems, but they refer to different models and the reports do not clearly show the same concrete failure or code path.", "right": "issue:44451"}, {"accept": false, "left": "issue:43645", "reason": "One is about custom models in Jupyter notebooks, the other about save/from_pretrained and extra_state serialization; too different to treat as the same bug.", "right": "issue:44164"}, {"accept": false, "left": "issue:43425", "reason": "A generic Torch 2.10 incompatibility report is not the same as a Phi-tiny-MoE autocast dtype-mismatch bug.", "right": "issue:43828"}, {"accept": false, "left": "issue:44265", "reason": "torch.export with torch_compilable_check and Perceiver image classification resizing are unrelated failure modes.", "right": "issue:44898"}, {"accept": false, "left": "issue:43257", "reason": "Both are model-loading failures, but they concern different architectures and different checkpoint/load paths; not enough evidence of the same bug.", "right": "issue:44863"}, {"accept": false, "left": "issue:43828", "reason": "Autocast dtype mismatch and meta-device loading failures are distinct issues affecting different mechanisms.", "right": "issue:43957"}, {"accept": false, "left": "issue:44589", "reason": "Float8 storage deserialization and mmap OOM on Strix Halo are unrelated problems.", "right": "issue:44756"}, {"accept": false, "left": "issue:43906", "reason": "An isolated reproduction of one issue is not the same as a Qwen3.5 PagedAttentionCache crash on linear_attention.", "right": "issue:44530"}, {"accept": false, "left": "issue:43425", "reason": "Torch version incompatibility and an image preprocessor/model dtype mismatch are different bugs.", "right": "issue:43716"}], "summary": "Only one soft pair looks like a true duplicate: the two resize_token_embeddings reports. The other pairs are similar at a high level but appear to describe different bugs, models, or code paths."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:45292|issue:45335", "issue:44402|issue:44451", "issue:43645|issue:44164", "issue:43425|issue:43828", "issue:44265|issue:44898", "issue:43257|issue:44863", "issue:43828|issue:43957", "issue:44589|issue:44756", "issue:43906|issue:44530", "issue:43425|issue:43716"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6956, "estimated_input_tokens": 3350, "item_count": 18, "node_count": 18, "serialized_chars": 13400, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:16:49Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "beb9be6c897cb739e0b2e2165ec68c832ca939d52d73d6e0c38249c1c5350729", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39404", "issue:43299", "issue:43425", "issue:43596", "issue:43611", "issue:43645", "issue:43901", "issue:43931", "issue:44479", "issue:44509", "issue:44530", "issue:44589", "issue:44683", "issue:44805", "issue:44863", "issue:44912", "issue:44918", "issue:45084"], "result": {"analyst_result": {"best_issue_reason": "Issue #43611 is the most generally representative of the repeated 'v5 breaks model loading' theme and has a clearer, broader user impact than the other issues.", "best_pr_reason": null, "canonical_issue_reason": "If a single issue must represent the cluster, issue #43611 is the broadest and most central loading-regression report, but the overall set is too heterogeneous to have a strong true canonical issue.", "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:39404", "reason": "Whisper pipeline return_language regression and MXFP4 quantization fallback are unrelated subsystems and failure modes.", "right": "issue:44912"}, {"accept": false, "left": "issue:43931", "reason": "Both are model-loading problems, but one is a Qwen3-VL weight-shape mismatch and the other is NemotronH checkpoint loading; no concrete shared code-path is evident.", "right": "issue:44863"}, {"accept": false, "left": "issue:43611", "reason": "Both mention Transformers 5.0.0 loading regressions, but one is base_model_prefix loading and the other is custom notebook model initialization; these are distinct bugs.", "right": "issue:43645"}, {"accept": false, "left": "issue:44530", "reason": "PagedAttentionCache linear_attention crash and compile-time non-template-node error are different failure classes with no sign of the same fix.", "right": "issue:45084"}, {"accept": false, "left": "issue:43901", "reason": "Both are documentation-related, but they target different pipeline/task docs and different API removals.", "right": "issue:44509"}, {"accept": false, "left": "issue:43299", "reason": "Both concern Qwen3-VL loading, but one is a MoE model-loading breakage and the other is a weight-shape mismatch for a different checkpoint; not the same bug.", "right": "issue:43931"}, {"accept": false, "left": "issue:44479", "reason": "A video-input regression for Qwen VL models and a mask/index shape mismatch are not the same concrete code-path issue.", "right": "issue:44805"}, {"accept": false, "left": "issue:44589", "reason": "Missing Float8 storage support and a compile failure on non-template nodes are unrelated errors.", "right": "issue:45084"}, {"accept": false, "left": "issue:43596", "reason": "Deepspeed ZeRO3/BertModel index error and compiled flex_attention failure on torch>=2.9 are distinct runtime problems.", "right": "issue:44683"}, {"accept": false, "left": "issue:43425", "reason": "Torch 2.10 incompatibility and Qwen3.5 embedding unpacking in TRL SFT trainer are unrelated issues.", "right": "issue:44918"}], "summary": "This cluster is mostly a set of unrelated Transformers issues that only loosely share themes like model loading regressions, docs cleanup, or runtime incompatibilities. None of the soft-paired items look like the same underlying bug or change, so the cluster should not be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:39404|issue:44912", "issue:43931|issue:44863", "issue:43611|issue:43645", "issue:44530|issue:45084", "issue:43901|issue:44509", "issue:43299|issue:43931", "issue:44479|issue:44805", "issue:44589|issue:45084", "issue:43596|issue:44683", "issue:43425|issue:44918"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7140, "estimated_input_tokens": 3442, "item_count": 18, "node_count": 18, "serialized_chars": 13767, "soft_pair_count": 11}, "cached_at": "2026-04-14T20:17:15Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0642bea3e7f6ccab1856a75965842ff8ef57818f97426129758e699af573bc8f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:32090", "issue:36032", "issue:39404", "issue:40990", "issue:42947", "issue:43452", "issue:43582", "issue:43632", "issue:43856", "issue:43957", "issue:43994", "issue:44079", "issue:44164", "issue:44488", "issue:44671", "issue:44756", "issue:44991", "issue:45081"], "result": {"analyst_result": {"best_issue_reason": "44991 is the most suitable single issue to anchor the cluster because it is specific, reproducible, and sits in the broad tokenizer/model-loading regression area shared by several candidates.", "best_pr_reason": null, "canonical_issue_reason": "issue:44991 is the closest umbrella representative because it is a concrete Transformers v5 tokenizer-loading regression, which is the most recurrent theme across the cluster. That said, the items are not true duplicates overall.", "canonical_pr_reason": null, "confidence": 0.81, "soft_edge_verdicts": [{"accept": false, "left": "issue:43856", "reason": "Both mention memory/OOM, but one is Qwen3 MoE training memory use and the other is Strix Halo mmap behavior; different root causes and fixes.", "right": "issue:44756"}, {"accept": false, "left": "issue:43994", "reason": "43994 is a model/pipeline output correctness bug for SigLIP2; 44079 is a generic ModelOutput key-assignment issue when a previous value was None. Not the same code path.", "right": "issue:44079"}, {"accept": false, "left": "issue:40990", "reason": "High perplexity in evaluation and ineffective gradient checkpointing are distinct training-quality and training-memory issues, not the same underlying defect.", "right": "issue:42947"}, {"accept": false, "left": "issue:44671", "reason": "One is incorrect CamemBERT MLM predictions in v5, the other is tokenizer loading failure for est-roberta. Similar version context, but different failure modes.", "right": "issue:44991"}, {"accept": false, "left": "issue:36032", "reason": "T5 tokenizer special-token conflict and gguf_file loading failures affect different loading paths and have different error signatures.", "right": "issue:43452"}, {"accept": false, "left": "issue:39404", "reason": "Whisper pipeline return_language regression and CamemBERT masked-LM prediction regression are unrelated model-specific behavior changes.", "right": "issue:44671"}, {"accept": false, "left": "issue:44991", "reason": "Both are tokenizer-loading regressions, but they hit different tokenizer implementations and different crashes; not enough evidence of one shared bug.", "right": "issue:45081"}, {"accept": false, "left": "issue:43582", "reason": "Apple Silicon caching_allocator_warmup TypeError and the _is_hf_initialized flag regression are separate issues in different subsystems.", "right": "issue:43632"}, {"accept": false, "left": "issue:44488", "reason": "Both are model/tokenizer loading problems, but sleng-bert loading and Mistral regex/backend_tokenizer crashing are different model-specific failures.", "right": "issue:45081"}, {"accept": false, "left": "issue:43957", "reason": "Meta-device loading regressions and save/from_pretrained extra_state handling are distinct serialization/loading bugs.", "right": "issue:44164"}, {"accept": false, "left": "issue:32090", "reason": "Trainer _gpu_broadcast_one NoneType error and Whisper pipeline return_language failure are unrelated.", "right": "issue:39404"}], "summary": "This cluster is heterogeneous: the soft pairs are broadly theme-similar (loading, tokenizer, training, memory, v5 regressions) but do not look like the same underlying bug/change. I would not merge any of the candidate pairs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43856|issue:44756", "issue:43994|issue:44079", "issue:40990|issue:42947", "issue:44671|issue:44991", "issue:36032|issue:43452", "issue:39404|issue:44671", "issue:44991|issue:45081", "issue:43582|issue:43632", "issue:44488|issue:45081", "issue:43957|issue:44164", "issue:32090|issue:39404"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6456, "estimated_input_tokens": 3100, "item_count": 17, "node_count": 17, "serialized_chars": 12398, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:19:22Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "70be9712436004a530c298d5236bf5092e41a7db3d41356f4a4cfa0135159eb8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:40990", "issue:43421", "issue:43425", "issue:43611", "issue:43644", "issue:43716", "issue:43883", "issue:44164", "issue:44206", "issue:44220", "issue:44265", "issue:44479", "issue:44488", "issue:44610", "issue:44987", "issue:45084", "issue:45161"], "result": {"analyst_result": {"best_issue_reason": "44206 is the most actionable issue in the set and the only one that plausibly serves as a duplicate target.", "best_pr_reason": null, "canonical_issue_reason": "44206 is the best anchor because it is the more specific report and appears to describe the underlying crash path that 44220 is also pointing at.", "canonical_pr_reason": null, "confidence": 0.34, "soft_edge_verdicts": [{"accept": false, "left": "issue:40990", "reason": "Different subsystems and failures: perplexity on GPT-OSS vs runtime special-token/post-processor updates.", "right": "issue:43421"}, {"accept": false, "left": "issue:43425", "reason": "Both mention model/runtime compatibility, but one is a Torch-version incompatibility and the other is a GPT-OSS MoE tensor-parallelism issue.", "right": "issue:45161"}, {"accept": false, "left": "issue:43883", "reason": "Distinct problems: missing attribute on MolmoForCausalLM vs failure to load a specific model checkpoint.", "right": "issue:44488"}, {"accept": false, "left": "issue:44164", "reason": "Save/from_pretrained extra_state handling and video-input regression are unrelated code paths.", "right": "issue:44479"}, {"accept": false, "left": "issue:44265", "reason": "torch.export/torch_compilable_check failure is unrelated to OmDet-Turbo processor output-size mismatch.", "right": "issue:44610"}, {"accept": false, "left": "issue:44488", "reason": "Both are load failures, but for different models and likely different root causes; too broad to merge as one bug.", "right": "issue:44987"}, {"accept": false, "left": "issue:43716", "reason": "Image-preprocessor dtype mismatch and template-node compilation failure are unrelated.", "right": "issue:45084"}, {"accept": true, "left": "issue:44206", "reason": "Both point to the same audio feature-extraction path and likely the same fbank/center-arg crash regression.", "right": "issue:44220"}, {"accept": false, "left": "issue:43611", "reason": "Two separate Transformers 5.0 regressions with different symptoms and code paths: base_model_prefix loading vs non-persistent buffers.", "right": "issue:43644"}], "summary": "This cluster is mostly heterogeneous: the soft-similarity links generally connect unrelated bugs. Only the audio feature-extraction pair (44206/44220) looks plausibly like the same underlying regression."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:40990|issue:43421", "issue:43425|issue:45161", "issue:43883|issue:44488", "issue:44164|issue:44479", "issue:44265|issue:44610", "issue:44488|issue:44987", "issue:43716|issue:45084", "issue:44206|issue:44220", "issue:43611|issue:43644"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6484, "estimated_input_tokens": 3114, "item_count": 17, "node_count": 17, "serialized_chars": 12453, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:20:05Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6401fa5ec410bd697a9a87777a14feee3afa1119a11fc40a06565059d9332704", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34567", "issue:40444", "issue:42947", "issue:43317", "issue:43452", "issue:43482", "issue:43541", "issue:43643", "issue:43644", "issue:43646", "issue:43883", "issue:44451", "issue:44530", "issue:44756", "issue:44987", "issue:44991", "issue:45084"], "result": {"analyst_result": {"best_issue_reason": "Issue 43452 is the strongest canonical candidate: specific, actionable, and broad enough to represent the shared GGUF loading failure without being overly generic.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43452 is the clearest concrete bug report and best umbrella for the GGUF auto-loading regression, covering both tokenizer and causal LM load paths.", "canonical_pr_reason": null, "confidence": 0.78, "soft_edge_verdicts": [{"accept": false, "left": "issue:42947", "reason": "Different problems: LoRA/gradient checkpointing behavior vs mmap/OOM on Strix Halo.", "right": "issue:44756"}, {"accept": false, "left": "issue:34567", "reason": "Unrelated training-state metric bug vs multimodal IterableDataset failure.", "right": "issue:40444"}, {"accept": false, "left": "issue:43883", "reason": "Different model-loading failures with different symptoms and likely different code paths.", "right": "issue:44991"}, {"accept": true, "left": "issue:43452", "reason": "Both describe the same GGUF loading regression in the v5 auto-loading path; 43482 looks like a specific model instance of the broader 43452 bug.", "right": "issue:43482"}, {"accept": false, "left": "issue:43541", "reason": "Torch dynamo/grouped_mm tracing error is unrelated to a template compilation TypeError.", "right": "issue:45084"}, {"accept": false, "left": "issue:44451", "reason": "Both are loading issues, but the models and failure modes are different enough to suggest separate bugs.", "right": "issue:44987"}, {"accept": false, "left": "issue:43643", "reason": "Remote-code config field loss is unrelated to PagedAttentionCache linear_attention crashes.", "right": "issue:44530"}, {"accept": false, "left": "issue:43644", "reason": "Both mention Transformers 5.0.0 regressions, but one is about non-persistent buffers and the other about custom model initialization; not the same concrete bug.", "right": "issue:43646"}, {"accept": false, "left": "issue:43317", "reason": "Dequantized model offload failure is a different loading/offload path than GGUF model loading.", "right": "issue:43482"}], "summary": "Most items are unrelated bugs spanning training metrics, model loading, cache behavior, and v5 regressions. The only plausible duplicate pair is the GGUF loading regression affecting auto-loading paths."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42947|issue:44756", "issue:34567|issue:40444", "issue:43883|issue:44991", "issue:43452|issue:43482", "issue:43541|issue:45084", "issue:44451|issue:44987", "issue:43643|issue:44530", "issue:43644|issue:43646", "issue:43317|issue:43482"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7080, "estimated_input_tokens": 3412, "item_count": 18, "node_count": 18, "serialized_chars": 13647, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:20:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "08fc55ec248ca86e1b97d9c30e1ce47ad499fcc721a3249c6c387ef2ec9920d4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36032", "issue:39404", "issue:42915", "issue:43262", "issue:43377", "issue:43404", "issue:43645", "issue:43646", "issue:43825", "issue:44403", "issue:44466", "issue:44530", "issue:44534", "issue:44849", "issue:44987", "issue:44991", "issue:45081", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": "issue:43645 is the best anchor issue because it describes a general, high-impact regression rather than a model-specific edge case.", "best_pr_reason": null, "canonical_issue_reason": "issue:43645 is the broadest, most representative user-facing v5 regression here (custom model initialization breaking), but the set does not form a coherent duplicate cluster overall.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:36032", "reason": "Tokenizer-load conflict in T5 is unrelated to Qwen3Moe FP8 configuration failure.", "right": "issue:42915"}, {"accept": false, "left": "issue:43262", "reason": "Both are audio-related, but one is a chat template sampling-rate bug and the other is a batching/padding-mask encoder bug.", "right": "issue:43377"}, {"accept": false, "left": "issue:44849", "reason": "Different Qwen model families and different failure modes: hidden-states output vs rope/position-id scaling.", "right": "issue:45325"}, {"accept": false, "left": "issue:39404", "reason": "Both involve pipeline/Whisper/translation, but one is a functional return_language regression and the other is an error-message / task-support messaging issue.", "right": "issue:43825"}, {"accept": false, "left": "issue:44987", "reason": "Both are loading failures under v5, but they affect different models/tokenizers and do not appear to share the same concrete code path.", "right": "issue:44991"}, {"accept": false, "left": "issue:42915", "reason": "Qwen3Moe FP8 failure and Qwen3.5 PagedAttentionCache crash are distinct bugs with different triggers and fixes.", "right": "issue:44530"}, {"accept": false, "left": "issue:36032", "reason": "T5 tokenizer special-token conflict is not the same as custom model initialization breaking in Jupyter.", "right": "issue:43645"}, {"accept": false, "left": "issue:43404", "reason": "Untied lm_head in Mistral3 is a model-specific tying bug; inconsistent serialization by device is a separate save/load behavior issue.", "right": "issue:44466"}, {"accept": false, "left": "issue:43646", "reason": "Custom model initialization breakage and non-persistent buffer junk initialization are different regressions.", "right": "issue:44534"}, {"accept": false, "left": "issue:44403", "reason": "Generic load noise is not the same as the Mistral tokenizer backend_tokenizer crash with fix_mistral_regex=True.", "right": "issue:45081"}], "summary": "The cluster is mostly a set of unrelated Transformer v5 regression reports across different models and subsystems. None of the proposed soft edges look like true duplicates of the same underlying bug/change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:36032|issue:42915", "issue:43262|issue:43377", "issue:44849|issue:45325", "issue:39404|issue:43825", "issue:44987|issue:44991", "issue:42915|issue:44530", "issue:36032|issue:43645", "issue:43404|issue:44466", "issue:43646|issue:44534", "issue:44403|issue:45081"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6778, "estimated_input_tokens": 3261, "item_count": 18, "node_count": 18, "serialized_chars": 13041, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:20:39Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fefd9be2297c33e8046aed6501851bb0179680f2c18ed084e44541d7c67753b1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41553", "issue:42915", "issue:43322", "issue:43377", "issue:43540", "issue:43582", "issue:43645", "issue:43819", "issue:43994", "issue:44220", "issue:44451", "issue:44534", "issue:44610", "issue:44683", "issue:45081", "issue:45084", "issue:45216", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": "Issue 45216 is the best representative of the only genuine duplicate thread in this batch: it states the broader Qwen3.5 save_pretrained regression that 45357 narrows down to incorrect visual encoder keys.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue for the whole set; the only true duplicate pair is 45216/45357, and 45216 is the better anchor because it is broader and earlier.", "canonical_pr_reason": null, "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "issue:44451", "reason": "Both are tokenizer-loading failures, but they target different models and different root causes: ScandiBERT load failure vs Mistral regex patch crash.", "right": "issue:45081"}, {"accept": false, "left": "issue:43994", "reason": "Both involve model/processor output mismatches, but the affected models and failure modes are unrelated (SigLIP2 AutoModel/pipeline vs OmDet-Turbo image size mismatch).", "right": "issue:44610"}, {"accept": false, "left": "issue:43322", "reason": "Different multimodal bugs with different code paths: Llava Next loading segfault vs Qwen3OmniMoe video-input ValueError.", "right": "issue:43540"}, {"accept": false, "left": "issue:43645", "reason": "Both are Transformers v5 regressions, but one is Jupyter custom-model initialization and the other is non-persistent buffer initialization; not the same bug.", "right": "issue:44534"}, {"accept": true, "left": "issue:45216", "reason": "Same underlying Qwen3.5 save_pretrained regression: both report incorrect saved checkpoint contents, with 45357 adding the specific visual-encoder-key symptom.", "right": "issue:45357"}, {"accept": false, "left": "issue:44220", "reason": "Unrelated issues: one is about _torch_extract_fbank_features, the other about compiled flex_attention on torch >= 2.9.", "right": "issue:44683"}, {"accept": false, "left": "issue:43377", "reason": "Both concern audio/model correctness, but they are different components and bugs: MIMI padding-mask batching vs DAC from_latents/STE mismatch.", "right": "issue:43819"}, {"accept": false, "left": "issue:41553", "reason": "Completely different problems: Voxtral AutoTokenizer error-message quality vs Apple Silicon caching_allocator_warmup TypeError.", "right": "issue:43582"}, {"accept": false, "left": "issue:42915", "reason": "No shared bug or code path: Qwen3Moe FineGrainedFP8Config failure vs a template-compilation TypeError.", "right": "issue:45084"}], "summary": "This set is mostly not a single duplicate cluster: the items span unrelated bugs across tokenizers, multimodal models, audio, compilation, and platform-specific failures. The only strong duplicate-looking pair is the Qwen3.5 save_pretrained regression (45216/45357), where 45357 appears to be a more specific follow-up to the same checkpoint-key bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44451|issue:45081", "issue:43994|issue:44610", "issue:43322|issue:43540", "issue:43645|issue:44534", "issue:45216|issue:45357", "issue:44220|issue:44683", "issue:43377|issue:43819", "issue:41553|issue:43582", "issue:42915|issue:45084"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6630, "estimated_input_tokens": 3187, "item_count": 17, "node_count": 17, "serialized_chars": 12748, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:20:53Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "730389e49cbda655f46f79089c1440ca8ade92a3ecdc38c1dfd7b3fcdfb614c3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:40990", "issue:42915", "issue:43296", "issue:43404", "issue:43531", "issue:43632", "issue:44062", "issue:44265", "issue:44291", "issue:44479", "issue:44756", "issue:44811", "issue:44987", "issue:45072", "issue:45081", "issue:45127", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43632", "reason": "Different bugs: one is an _is_hf_initialized/v5 compatibility regression, the other is an mmap/OOM platform issue.", "right": "issue:44756"}, {"accept": false, "left": "issue:44987", "reason": "Both involve model loading, but they fail in different tokenizer paths and for different models; not the same underlying bug.", "right": "issue:45081"}, {"accept": false, "left": "issue:43296", "reason": "Different targets and failures: PaddleOCR-VL load failure vs Qwen3-MoE sliding_window behavior.", "right": "issue:43531"}, {"accept": false, "left": "issue:43404", "reason": "One is lm_head weight tying in Mistral3ForConditionalGeneration; the other is LoRA merge collapse with extended vocab. Distinct code paths.", "right": "issue:45127"}, {"accept": false, "left": "issue:44062", "reason": "AddedToken constructor error is unrelated to the physical-intelligence/fast load failure.", "right": "issue:44987"}, {"accept": false, "left": "issue:44479", "reason": "Both are Qwen VL regressions, but one is video-input handling and the other is still-image rope/index scaling; not the same defect.", "right": "issue:45325"}, {"accept": false, "left": "issue:40990", "reason": "Perplexity on GPT-OSS and Whisper batch_decode skip_special_tokens are unrelated bugs.", "right": "issue:44811"}, {"accept": false, "left": "issue:44265", "reason": "torch.export with torch_compilable_check and bfloat16 dtype mismatches are different failure modes.", "right": "issue:45072"}, {"accept": false, "left": "issue:42915", "reason": "FP8 config failure for Qwen3Moe is separate from the _is_hf_initialized/init_empty_weights argument error.", "right": "issue:44291"}], "summary": "This cluster is heterogeneous: the issues touch different models, regressions, and code paths, so there is no true duplicate core to consolidate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43632|issue:44756", "issue:44987|issue:45081", "issue:43296|issue:43531", "issue:43404|issue:45127", "issue:44062|issue:44987", "issue:44479|issue:45325", "issue:40990|issue:44811", "issue:44265|issue:45072", "issue:42915|issue:44291"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6536, "estimated_input_tokens": 3140, "item_count": 17, "node_count": 17, "serialized_chars": 12559, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:21:08Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "064827165742e80c8f19a5ea967b9d94c82212ac036d9f44af416d52f95c46e4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36032", "issue:40444", "issue:42915", "issue:43317", "issue:43421", "issue:43531", "issue:43611", "issue:43632", "issue:43742", "issue:43818", "issue:43856", "issue:44534", "issue:44568", "issue:44589", "issue:44877", "issue:45325", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43317", "reason": "Both involve Qwen-related model behavior, but one is a device_map/offload loading failure and the other is MoE training memory usage; different code paths and fixes.", "right": "issue:43856"}, {"accept": false, "left": "issue:43611", "reason": "One is a base_model_prefix loading regression and the other is non-persistent buffers being initialized with junk; unrelated mechanisms.", "right": "issue:44534"}, {"accept": false, "left": "issue:42915", "reason": "Both touch float8/quantization-adjacent failures, but one is Qwen3MoE with FineGrainedFP8Config while the other is a storage-type TypeError; not the same bug.", "right": "issue:44589"}, {"accept": false, "left": "issue:43421", "reason": "Both concern special tokens, but one is runtime post-processor syncing and the other is add_special_tokens not adding BOS/EOS for a specific tokenizer; distinct issues.", "right": "issue:44568"}, {"accept": false, "left": "issue:36032", "reason": "Tokenizer method-name conflict vs key error loading a different model; no shared underlying code path.", "right": "issue:43742"}, {"accept": false, "left": "issue:43632", "reason": "_is_hf_initialized flag regression and junk-filled buffers are separate v5 breakages with different root causes.", "right": "issue:44534"}, {"accept": false, "left": "issue:40444", "reason": "Both are Qwen2.5-VL, but one is finetuning with multiple images per prompt and the other is rope-index scaling for still-image positions; different bugs.", "right": "issue:45325"}, {"accept": false, "left": "issue:43818", "reason": "Different models and different failures: Video-LLaVA video tower initialization vs Qwen3.5 save_pretrained visual encoder keys.", "right": "issue:45357"}, {"accept": false, "left": "issue:43531", "reason": "Sliding-window behavior in Qwen3-MoE and strict config parsing for granite_speech are unrelated.", "right": "issue:44877"}], "summary": "The set is a grab bag of unrelated loading, tokenizer, multimodal, quantization, and config issues. None of the soft pairs looks like the same underlying bug/change, so I would not consolidate any of them."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43317|issue:43856", "issue:43611|issue:44534", "issue:42915|issue:44589", "issue:43421|issue:44568", "issue:36032|issue:43742", "issue:43632|issue:44534", "issue:40444|issue:45325", "issue:43818|issue:45357", "issue:43531|issue:44877"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6844, "estimated_input_tokens": 3294, "item_count": 18, "node_count": 18, "serialized_chars": 13176, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:21:26Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "637aa550913fa50ee6c748f41f49cc6d41c20e2bd1881914b19aabc794d02b77", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33666", "issue:35707", "issue:36032", "issue:40444", "issue:41720", "issue:43425", "issue:43792", "issue:44079", "issue:44186", "issue:44265", "issue:44403", "issue:44534", "issue:44589", "issue:44683", "issue:44821", "issue:44991", "issue:45020", "issue:45084"], "result": {"analyst_result": {"best_issue_reason": "#44403 is the most general and still-open issue in the set, so it is the least specific representative. The rest are narrower or clearly different failure modes.", "best_pr_reason": null, "canonical_issue_reason": "No single issue cleanly represents the cluster; if one must be chosen, #44403 is the broadest open report and the closest to a generic Transformers-loading theme, but it is not a true duplicate hub.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:44683", "reason": "Different bugs: compiled flex_attention on torch>=2.9 vs remote_code breakage in recent Transformers.", "right": "issue:45020"}, {"accept": false, "left": "issue:44821", "reason": "Unrelated failures: loading AutoImageProcessor from a URL vs compilation of non-template nodes.", "right": "issue:45084"}, {"accept": false, "left": "issue:43425", "reason": "Torch 2.10 incompatibility is a version-specific runtime problem; the other is generic load-noise behavior.", "right": "issue:44403"}, {"accept": false, "left": "issue:36032", "reason": "Tokenizer metaclass/method conflict and Qwen3 CUDA assert are entirely different code paths.", "right": "issue:41720"}, {"accept": false, "left": "issue:43792", "reason": "Whisper model load/runtime failure does not match the Float8 storage lookup error.", "right": "issue:44589"}, {"accept": false, "left": "issue:44186", "reason": "LayoutLMv2 tokenizer crashes and torch.export/torch_compilable_check failures are distinct subsystems.", "right": "issue:44265"}, {"accept": false, "left": "issue:44403", "reason": "Both mention loading, but one is about noisy logging and the other is a tokenizer compatibility failure in v5.", "right": "issue:44991"}, {"accept": false, "left": "issue:33666", "reason": "Both involve Qwen-VL, but one is multi-GPU training while the other is iterable dataset/multi-image finetuning failure.", "right": "issue:40444"}, {"accept": false, "left": "issue:35707", "reason": "Progressive generation with inputs_embeds/past_key_values is unrelated to multi-image iterable-dataset finetuning.", "right": "issue:40444"}, {"accept": false, "left": "issue:44079", "reason": "ModelOutput key assignment and non-persistent buffer initialization are different internal bugs.", "right": "issue:44534"}], "summary": "This cluster is not a true duplicate set. The items cover unrelated failures across tokenizer loading, Qwen/VL finetuning, model output semantics, torch.export, device mapping, and version compatibility. The soft links are mostly keyword overlap around Transformers, not the same bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44683|issue:45020", "issue:44821|issue:45084", "issue:43425|issue:44403", "issue:36032|issue:41720", "issue:43792|issue:44589", "issue:44186|issue:44265", "issue:44403|issue:44991", "issue:33666|issue:40444", "issue:35707|issue:40444", "issue:44079|issue:44534"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7114, "estimated_input_tokens": 3429, "item_count": 18, "node_count": 18, "serialized_chars": 13715, "soft_pair_count": 11}, "cached_at": "2026-04-14T20:21:47Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "62ba72393751e03510cbea31248f6fa435441ddc0eae52766e334c2f2ad6d83b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41720", "issue:42175", "issue:43296", "issue:43482", "issue:43525", "issue:43531", "issue:43606", "issue:43644", "issue:43749", "issue:43761", "issue:43819", "issue:43881", "issue:44079", "issue:44265", "issue:44291", "issue:44488", "issue:44589", "issue:45081"], "result": {"analyst_result": {"best_issue_reason": "Issue 44079 has the broadest scope and the clearest reusable triage value among otherwise unrelated reports, so it is the best single issue to surface.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44079 is the best representative if one issue must anchor the set: it describes a core library-level regression in ModelOutput assignment, which is broader than the more model-specific failures here.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43482", "reason": "Different failures and code paths: Qwen2.5-GGUF/transformers-v5 loading vs Llama4Config missing pad_token_id.", "right": "issue:43525"}, {"accept": false, "left": "issue:44291", "reason": "Both are TypeErrors, but the causes differ: init_empty_weights/_is_hf_initialized mismatch vs missing Float8 storage type.", "right": "issue:44589"}, {"accept": false, "left": "issue:43606", "reason": "CPU-offload device mismatch in bark-small is unrelated to torch.export export failures around torch_compilable_check.", "right": "issue:44265"}, {"accept": false, "left": "issue:43606", "reason": "Different model and failure mode: bark-small CPU offload device mismatch vs DAC.from_latents not matching forward due to missing STE.", "right": "issue:43819"}, {"accept": false, "left": "issue:43749", "reason": "FSDP CPU RAM efficient loading is a distributed-loading regression; ModelOutput key assignment is a separate container/output bug.", "right": "issue:44079"}, {"accept": false, "left": "issue:43761", "reason": "Both touch outputs, but one is CLIPVisionModel hidden_states behavior and the other is generic ModelOutput key assignment; not the same bug.", "right": "issue:44079"}, {"accept": false, "left": "issue:43644", "reason": "Non-persistent buffer corruption in Transformers 5.0.0 is a different regression from ModelOutput key handling.", "right": "issue:44079"}, {"accept": false, "left": "issue:42175", "reason": "Packaging/backend selection for pip extras vs a PaddleOCR-VL model load failure; unrelated subsystems and fixes.", "right": "issue:43296"}, {"accept": false, "left": "issue:43881", "reason": "glm-4v-9b loading failure and cjvt/sleng-bert loading failure are different model-specific issues.", "right": "issue:44488"}, {"accept": false, "left": "issue:41720", "reason": "Qwen3 auto device mapping CUDA assert on A800 is unrelated to the Mistral tokenizer regex patch crash.", "right": "issue:45081"}, {"accept": false, "left": "issue:43531", "reason": "Qwen3-MoE sliding_window behavior and init_empty_weights argument handling are different bugs with no shared concrete code path.", "right": "issue:44291"}], "summary": "The cluster is mostly a set of unrelated model-loading/regression issues; the soft-similarity links do not indicate the same underlying bug or change. No PRs are present, and none of the issue pairs are good duplicate merges."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43482|issue:43525", "issue:44291|issue:44589", "issue:43606|issue:44265", "issue:43606|issue:43819", "issue:43749|issue:44079", "issue:43761|issue:44079", "issue:43644|issue:44079", "issue:42175|issue:43296", "issue:43881|issue:44488", "issue:41720|issue:45081", "issue:43531|issue:44291"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6956, "estimated_input_tokens": 3350, "item_count": 18, "node_count": 18, "serialized_chars": 13399, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:22:16Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5fa1f30f22e562dc035068a28561475e7d54ec71c6b9b7743a84a6afe4108c5c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:40444", "issue:42915", "issue:43454", "issue:43482", "issue:43493", "issue:43611", "issue:43632", "issue:43646", "issue:43828", "issue:43994", "issue:44387", "issue:44466", "issue:44610", "issue:44617", "issue:44683", "issue:45072", "issue:45325", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "45325 is the most actionable representative: it describes the underlying positional-indexing bug clearly, and the related reports are downstream manifestations in similar Qwen2.5-VL image/video paths.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical for the entire set, but issue 45325 is the best anchor for the strongest subcluster because it states the concrete Qwen2.5-VL rope-index/position_id defect most directly.", "canonical_pr_reason": null, "confidence": 0.79, "soft_edge_verdicts": [{"accept": false, "left": "issue:40444", "reason": "Both are Qwen2.5-VL multimodal failures, but one is IterableDataset multi-image training breakage and the other is video vision_position_ids; too different to be the same bug.", "right": "issue:45381"}, {"accept": false, "left": "issue:44610", "reason": "Different models and different failures: OmDet-Turbo processor size mismatch vs Qwen2.5-VL rope-index scaling.", "right": "issue:45325"}, {"accept": true, "left": "issue:45325", "reason": "Same concrete Qwen2.5-VL positional-indexing path: still-image temporal position_ids vs video vision_position_ids both point to the same rope-index bug.", "right": "issue:45381"}, {"accept": false, "left": "issue:43828", "reason": "Unrelated failures: autocast dtype mismatch in Phi-tiny-MoE vs torch>=2.9 flex_attention compilation breakage.", "right": "issue:44683"}, {"accept": false, "left": "issue:44387", "reason": "Both mention OOM, but the causes are unrelated: int4 reserved-memory growth vs Sam3Video memory use.", "right": "issue:44617"}, {"accept": false, "left": "issue:43611", "reason": "Both are Transformers v5 loading/init regressions, but they affect different mechanisms: base_model_prefix loading vs custom model initialization.", "right": "issue:43646"}, {"accept": false, "left": "issue:43632", "reason": "Different bugs in different code paths: _is_hf_initialized flag handling vs dtype mismatch in SwitchTransformers/TimmWrapperModel.", "right": "issue:45072"}, {"accept": false, "left": "issue:42915", "reason": "Different models and different breakages: FineGrainedFP8Config on Qwen3Moe vs Qwen2.5-GGUF loading failure in v5.", "right": "issue:43482"}, {"accept": true, "left": "issue:43493", "reason": "Same underlying SigLIP2 implementation problem: HF output deviates from the original/JAX behavior and produces nonsensical downstream results.", "right": "issue:43994"}, {"accept": false, "left": "issue:43454", "reason": "Both involve lm_head/tied weights, but one is a model-specific weight-tying failure and the other is a broader device-dependent serialization inconsistency; not the same bug.", "right": "issue:44466"}], "summary": "Mostly a heterogeneous cluster with a few real duplicate-like pairs. The clearest duplicate subclusters are Qwen2.5-VL position-id/rope-index bugs and a SigLIP2 output-discrepancy pair; most other soft links are only thematically related, not the same bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:40444|issue:45381", "issue:44610|issue:45325", "issue:45325|issue:45381", "issue:43828|issue:44683", "issue:44387|issue:44617", "issue:43611|issue:43646", "issue:43632|issue:45072", "issue:42915|issue:43482", "issue:43493|issue:43994", "issue:43454|issue:44466"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6872, "estimated_input_tokens": 3308, "item_count": 18, "node_count": 18, "serialized_chars": 13230, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:22:35Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9b682366704ceb8192506bdb9d8ee3af7ee027509f3824eff87ea1681a95a546", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34634", "issue:35707", "issue:43262", "issue:43317", "issue:43526", "issue:43746", "issue:43792", "issue:43873", "issue:43881", "issue:43883", "issue:43957", "issue:44186", "issue:44291", "issue:44877", "issue:44898", "issue:44991", "issue:45072", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": "Issue 43873 is the best overall representative because it is the most generic and widely applicable report here, centered on offloading/quantization rather than a model-specific edge case.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical duplicate stands out; if a representative is needed, issue 43873 is the broadest and most central model-loading/offloading report in the set.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43792", "reason": "Both are loading-related, but they concern different models and different failure modes (Whisper runtime failure vs meta-device loading breakage).", "right": "issue:43957"}, {"accept": false, "left": "issue:43526", "reason": "These are unrelated bugs in different components: image processor label reduction vs a model attribute error during loading.", "right": "issue:43883"}, {"accept": false, "left": "issue:44186", "reason": "Tokenizer crash on NER/padding is unrelated to dtype mismatches in bfloat16 inference.", "right": "issue:45072"}, {"accept": false, "left": "issue:43881", "reason": "Both involve loading failures, but one is a model load failure and the other is a tokenizer load failure with different likely causes.", "right": "issue:44991"}, {"accept": false, "left": "issue:43317", "reason": "Both mention offloading/quantization, but the titles point to different concrete failures: dequantized model loading with device_map=auto vs general offloading behavior with quantization.", "right": "issue:43873"}, {"accept": false, "left": "issue:43262", "reason": "Audio processor chat-template sampling-rate defaults are unrelated to GraniteSpeech PEFT checkpoint loading.", "right": "issue:43746"}, {"accept": false, "left": "issue:44898", "reason": "Both touch vision position handling, but they are different code paths and models: Perceiver interpolation vs Qwen2.5-VL rope/temporal indexing.", "right": "issue:45325"}, {"accept": false, "left": "issue:44291", "reason": "One is an init_empty_weights argument incompatibility; the other is strict config validation blocking granite_speech config loading.", "right": "issue:44877"}, {"accept": false, "left": "issue:34634", "reason": "BarkProcessor voice_preset and progressive generation with inputs_embeds/past_key_values are unrelated bugs.", "right": "issue:35707"}], "summary": "These issues span unrelated bugs across audio, tokenization, vision, model loading, quantization/offload, and config handling. None of the soft pairs look like true duplicates; at most a few are in the same broad area, but not the same concrete bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43792|issue:43957", "issue:43526|issue:43883", "issue:44186|issue:45072", "issue:43881|issue:44991", "issue:43317|issue:43873", "issue:43262|issue:43746", "issue:44898|issue:45325", "issue:44291|issue:44877", "issue:34634|issue:35707"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7062, "estimated_input_tokens": 3403, "item_count": 18, "node_count": 18, "serialized_chars": 13609, "soft_pair_count": 11}, "cached_at": "2026-04-14T20:22:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "787130400ce0fe913bd9b6b444a63ac537184620f028900fcf0f15ec01ace30d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:35707", "issue:39692", "issue:40444", "issue:41720", "issue:43296", "issue:43377", "issue:43550", "issue:43632", "issue:43720", "issue:43749", "issue:43792", "issue:44164", "issue:44534", "issue:44610", "issue:44683", "issue:44877", "issue:44898", "issue:44991"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43550", "reason": "Both involve model/runtime failures, but one is a torch.compile+SDPA bug in Bamba-9B-v2 and the other is a packed-weights loading bug in BitNet AutoBitLinear; different code paths and likely different fixes.", "right": "issue:43720"}, {"accept": false, "left": "issue:43632", "reason": "Both relate to loading/saving behavior, but one is about the `_is_hf_initialized` flag in Transformers v5 and the other is about preserving `extra_state`; these are distinct serialization issues.", "right": "issue:44164"}, {"accept": false, "left": "issue:35707", "reason": "Progressive generation with `inputs_embeds`/`past_key_values` is unrelated to a Qwen3 auto device mapping cuda assert; different subsystems and failure modes.", "right": "issue:41720"}, {"accept": false, "left": "issue:44610", "reason": "Both are vision-model shape/processor complaints, but one is an OmDet-Turbo processor/model resolution mismatch and the other is a Perceiver interpolation failure with non-default image size; not the same bug.", "right": "issue:44898"}, {"accept": false, "left": "issue:43296", "reason": "Both mention loading/config compatibility in v5-era Transformers, but one is PaddleOCR-VL loading in vLLM and the other is strict config handling for granite_speech; separate model-specific breakages.", "right": "issue:44877"}, {"accept": false, "left": "issue:43296", "reason": "These are different loading failures: PaddleOCR-VL in vLLM versus tokenizer loading for EMBEDDIA/est-roberta. Shared version context is not enough to merge them.", "right": "issue:44991"}, {"accept": false, "left": "issue:43377", "reason": "MIMI padding-mask batch inconsistency and OmDet-Turbo processor/model size mismatch are different bug classes affecting different components.", "right": "issue:44610"}, {"accept": false, "left": "issue:43792", "reason": "Whisper-large-v2 runtime failure and compiled flex_attention failing on torch>=2.9 are unrelated; one is model execution, the other is an attention/compiler compatibility regression.", "right": "issue:44683"}, {"accept": false, "left": "issue:43377", "reason": "Batch-vs-single output drift from missing padding mask support is not the same as Perceiver failing on non-default resolution despite interpolation; separate models and root causes.", "right": "issue:44898"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 doc example errors and Qwen2.5-VL finetuning with multiple images per prompt are unrelated documentation/example versus training pipeline issues.", "right": "issue:40444"}, {"accept": false, "left": "issue:43749", "reason": "Both concern Transformers v5 loading/state behavior, but FSDP CPU RAM efficient loading and non-persistent buffers filled with junk are different mechanisms and fixes.", "right": "issue:44534"}], "summary": "This cluster is heterogeneous: the issue titles point to different bugs/regressions across unrelated models, loading paths, and runtime features. None of the soft pairs looks like the same concrete underlying defect, so all soft matches should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43550|issue:43720", "issue:43632|issue:44164", "issue:35707|issue:41720", "issue:44610|issue:44898", "issue:43296|issue:44877", "issue:43296|issue:44991", "issue:43377|issue:44610", "issue:43792|issue:44683", "issue:43377|issue:44898", "issue:39692|issue:40444", "issue:43749|issue:44534"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7176, "estimated_input_tokens": 3460, "item_count": 18, "node_count": 18, "serialized_chars": 13840, "soft_pair_count": 11}, "cached_at": "2026-04-14T20:23:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "830d69ffe544fb93dab37dd06829e32623d15b9a61a3ff9de0072a2f8052e662", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34567", "issue:40990", "issue:41720", "issue:43322", "issue:43404", "issue:43454", "issue:43526", "issue:43550", "issue:43606", "issue:43881", "issue:44186", "issue:44423", "issue:44466", "issue:44610", "issue:44977", "issue:45072", "issue:45357", "issue:45406"], "result": {"analyst_result": {"best_issue_reason": "issue:44466 has the cleanest bug definition and most actionable reproduction surface among the set.", "best_pr_reason": null, "canonical_issue_reason": "issue:44466 is the strongest representative because it is a concrete regression with a clear, well-scoped serialization/tied-weights failure and broad practical impact.", "canonical_pr_reason": null, "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "issue:43550", "reason": "Different failures and mechanisms: torch.compile/SDPA vs CPU offload device mismatch; same broad CI/model area only.", "right": "issue:43606"}, {"accept": false, "left": "issue:44466", "reason": "Both involve save/pretrained behavior, but one is tied-weight serialization and the other is incorrect visual encoder keys in Qwen3.5; not the same bug.", "right": "issue:45357"}, {"accept": false, "left": "issue:43454", "reason": "Different model families and symptoms: lm_head tying bug vs bfloat16 dtype mismatch in inference.", "right": "issue:45072"}, {"accept": false, "left": "issue:43454", "reason": "Unrelated concrete problems: weight tying in a multimodal model vs compile/SDPA failure in Bamba.", "right": "issue:43550"}, {"accept": false, "left": "issue:44423", "reason": "Both are serve crashes, but the error locations differ (`str.to` vs missing `_tokenizer`) and point to different processor bugs.", "right": "issue:45406"}, {"accept": false, "left": "issue:41720", "reason": "Different models and failure modes: cudaErrorAssert during auto device mapping vs segmentation fault when loading Llava Next.", "right": "issue:43322"}, {"accept": false, "left": "issue:44186", "reason": "Different components: tokenizer crashes on NER/padding vs processor image-size mismatch for a model.", "right": "issue:44610"}, {"accept": false, "left": "issue:41720", "reason": "Same model family only; one is an auto-device-map loading/assert issue, the other is flash-attention generation quality/behavior.", "right": "issue:44977"}, {"accept": false, "left": "issue:43404", "reason": "Both mention image-text models, but one is an lm_head tying bug and the other is a save_pretrained key regression.", "right": "issue:45357"}, {"accept": false, "left": "issue:43526", "reason": "Completely different failures: BeitImageProcessorFast label reduction vs glm-4v-9b loading failure.", "right": "issue:43881"}, {"accept": false, "left": "issue:34567", "reason": "No meaningful overlap: TrainerState token counter bug vs perplexity on a specific model/dataset.", "right": "issue:40990"}], "summary": "This cluster is very heterogeneous: the issues share only loose themes like multimodal/model-loading/training bugs, but they appear to describe distinct code paths and failures. I would not merge any of the soft pairs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43550|issue:43606", "issue:44466|issue:45357", "issue:43454|issue:45072", "issue:43454|issue:43550", "issue:44423|issue:45406", "issue:41720|issue:43322", "issue:44186|issue:44610", "issue:41720|issue:44977", "issue:43404|issue:45357", "issue:43526|issue:43881", "issue:34567|issue:40990"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6532, "estimated_input_tokens": 3138, "item_count": 17, "node_count": 17, "serialized_chars": 12552, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:23:39Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "86a0310b5a1e9daabe28213442bcd9ac984a9418d99e5a32f934daf9f229a084", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42617", "issue:42915", "issue:43377", "issue:43454", "issue:43577", "issue:43606", "issue:43708", "issue:43720", "issue:43873", "issue:44186", "issue:44361", "issue:44479", "issue:44756", "issue:44811", "issue:44877", "issue:44918", "issue:44991"], "result": {"analyst_result": {"best_issue_reason": "Issue #43873 is the best standalone canonical candidate because it is an open, broadly described bug around offloading and quantization, which could absorb related follow-ups if any existed.", "best_pr_reason": null, "canonical_issue_reason": "If one issue must serve as the closest representative, issue #43873 is the broadest and most central-looking open bug report, but the set does not form a single duplicate cluster.", "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:43454", "reason": "Different models and failure modes: AyaVision weight tying vs BitNet packed-weight unpacking during accelerate loading.", "right": "issue:43720"}, {"accept": false, "left": "issue:43873", "reason": "Both mention offloading/memory, but one is quantization behavior and the other is Strix Halo mmap OOM; different root causes and contexts.", "right": "issue:44756"}, {"accept": false, "left": "issue:43577", "reason": "Blip2 dtype loading bug vs Trainer checkpoint-resume max_steps calculation bug; unrelated subsystems.", "right": "issue:43708"}, {"accept": false, "left": "issue:43377", "reason": "MIMI encoder padding-mask batching issue vs Whisper batch_decode skip_special_tokens bug; different components and defects.", "right": "issue:44811"}, {"accept": false, "left": "issue:44186", "reason": "Both are tokenizer bugs, but they affect different tokenizers and different crash paths, so they are not the same underlying issue.", "right": "issue:44361"}, {"accept": false, "left": "issue:44877", "reason": "Strict config loading for granite_speech vs tokenizer loading for EMBEDDIA/est-roberta; different model/config compatibility problems.", "right": "issue:44991"}, {"accept": false, "left": "issue:42617", "reason": "A 3d_parallel.py execution problem and a Qwen3Moe FineGrainedFP8Config failure are distinct issues with no concrete shared code path.", "right": "issue:42915"}, {"accept": false, "left": "issue:44479", "reason": "Video-input regression across Qwen VL variants vs Qwen3.5 embedding unpacking with TRL SFT trainer; different features and code paths.", "right": "issue:44918"}, {"accept": false, "left": "issue:43606", "reason": "CPU offload device mismatch in bark-small vs packed-weight unpacking for BitNet; both touch loading/offload, but not the same bug.", "right": "issue:43720"}], "summary": "The items are mostly unrelated bug reports across different models, tokenizers, training, and loading paths. None of the soft pairs are strong enough to treat as duplicates; they share broad subsystem language at most, not the same underlying defect."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43454|issue:43720", "issue:43873|issue:44756", "issue:43577|issue:43708", "issue:43377|issue:44811", "issue:44186|issue:44361", "issue:44877|issue:44991", "issue:42617|issue:42915", "issue:44479|issue:44918", "issue:43606|issue:43720"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6862, "estimated_input_tokens": 3303, "item_count": 18, "node_count": 18, "serialized_chars": 13211, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:24:02Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f88443c9e59af7916cbc9f1994f51f5a1fdbacd3aa2fce0dd3d0a9a916dce0e3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36331", "issue:42617", "issue:42915", "issue:42994", "issue:43316", "issue:43317", "issue:43482", "issue:43531", "issue:43550", "issue:43572", "issue:43653", "issue:44292", "issue:44479", "issue:44671", "issue:44743", "issue:44898", "issue:44913", "issue:45072"], "result": {"analyst_result": {"best_issue_reason": "Issue 44479 is the strongest standalone representative: it describes a concrete regression, names multiple affected model variants, and is more broadly scoped than the other reports. Still, it does not duplicate the others.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical duplicate emerges; the issues span distinct bugs across different models and subsystems, and the soft pairs are only superficial text similarity.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:42994", "reason": "Both involve quantized/model-format workflows, but one is about saving quantized models and the other about GGUF loading failure in v5; different code paths and failure modes.", "right": "issue:43482"}, {"accept": false, "left": "issue:44671", "reason": "Completely different bugs: CamemBERT masked-LM prediction errors versus Qwen3.5 recurrent state reset when using cache.", "right": "issue:44743"}, {"accept": false, "left": "issue:36331", "reason": "Training API signature mismatch in CustomTrainer is unrelated to a Qwen3MoE + FineGrainedFP8Config failure.", "right": "issue:42915"}, {"accept": false, "left": "issue:42915", "reason": "One is a FineGrainedFP8Config/model failure; the other is a device_map/offload dequantized-model loading issue. Same broad area, different bug.", "right": "issue:43317"}, {"accept": false, "left": "issue:36331", "reason": "Unexpected compute_loss keyword argument error is unrelated to running 3d_parallel.py.", "right": "issue:42617"}, {"accept": false, "left": "issue:43316", "reason": "Both are config-related, but one is a Gemma3TextConfig API discrepancy and the other is GPTNeoX rotary_pct not persisting on reload.", "right": "issue:44913"}, {"accept": false, "left": "issue:43550", "reason": "Bamba torch.compile/SDPA failure and BigBirdTokenizer special-token registration are unrelated subsystems and failure modes.", "right": "issue:43653"}, {"accept": false, "left": "issue:44898", "reason": "Perceiver non-default-resolution image classification failure is not the same bug as bf16 dtype mismatches in SwitchTransformers/TimmWrapperModel.", "right": "issue:45072"}, {"accept": false, "left": "issue:43531", "reason": "Qwen3-MoE sliding_window behavior and StableLmConfig missing pad_token_idx are distinct model/config issues.", "right": "issue:43572"}, {"accept": false, "left": "issue:44292", "reason": "Both mention Qwen-family models, but one is an NVFP4 runtime/loading error and the other is a video-input regression across several VL/MoE models.", "right": "issue:44479"}], "summary": "The set is heterogeneous: training API mismatch, quantization/loading issues, config serialization bugs, tokenizer regressions, and several model-specific inference failures. None of the soft pairs look like the same underlying bug, so there are no duplicate merges here."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42994|issue:43482", "issue:44671|issue:44743", "issue:36331|issue:42915", "issue:42915|issue:43317", "issue:36331|issue:42617", "issue:43316|issue:44913", "issue:43550|issue:43653", "issue:44898|issue:45072", "issue:43531|issue:43572", "issue:44292|issue:44479"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6634, "estimated_input_tokens": 3189, "item_count": 17, "node_count": 17, "serialized_chars": 12754, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:24:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "46c19de58603e3e3df57b617f0a6a0d142a1d0aa864a7660db933da8ee1218f9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33357", "issue:35707", "issue:36010", "issue:41720", "issue:42617", "issue:42915", "issue:43322", "issue:43388", "issue:43540", "issue:43606", "issue:43653", "issue:43782", "issue:43873", "issue:44514", "issue:44545", "issue:45059", "issue:45161"], "result": {"analyst_result": {"best_issue_reason": "issue:44514 is the best representative issue in this set because it anchors the only confirmed duplicate pair and has the richer report. The rest of the items are too unrelated to serve as a single cluster representative.", "best_pr_reason": null, "canonical_issue_reason": "issue:44514 is the strongest canonical candidate for the only true duplicate pair: it is earlier, more detailed, and already has inbound references, while issue:44545 is a near-identical follow-up report.", "canonical_pr_reason": null, "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "issue:43606", "reason": "Different bugs in different components: CPU offload device mismatch for suno/bark-small vs BigBirdTokenizer special-token decode behavior.", "right": "issue:43653"}, {"accept": false, "left": "issue:41720", "reason": "Both involve model parallelism, but the concrete failures and models differ: cudaErrorAssert on Qwen3 auto device mapping vs GPT-OSS MoE TP not working.", "right": "issue:45161"}, {"accept": false, "left": "issue:41720", "reason": "Different models and failure modes; Qwen3 A800 device-map assert is not the same as failing to run 3d_parallel.py.", "right": "issue:42617"}, {"accept": false, "left": "issue:43388", "reason": "Unrelated areas: gather_for_metrics label truncation vs SAM3 PCS text/bounding-box behavior.", "right": "issue:45059"}, {"accept": false, "left": "issue:43540", "reason": "Both are Qwen3-family reports, but one is video-input validation in Qwen3OmniMoe and the other is a from_pretrained weight_only loading error in Qwen3VL; not the same bug.", "right": "issue:43782"}, {"accept": false, "left": "issue:42915", "reason": "Different models and code paths: Qwen3Moe FP8 config failure vs Llava Next segmentation fault on load.", "right": "issue:43322"}, {"accept": false, "left": "issue:41720", "reason": "Too broad a similarity around offloading/parallelism; the concrete bugs differ between auto device mapping assert and quantization offloading behavior.", "right": "issue:43873"}, {"accept": false, "left": "issue:36010", "reason": "No duplicate signal beyond being general runtime issues; ImportError for GenerationMixin is unrelated to 3d_parallel.py.", "right": "issue:42617"}, {"accept": true, "left": "issue:44514", "reason": "Near-verbatim titles describing the same crash in Qwen2_5_VLProcessor.apply_chat_template on batched input with padding=False.", "right": "issue:44545"}, {"accept": false, "left": "issue:33357", "reason": "Completely different problems: MacOS bus error with pretrained community CLIP vs progressive generation with inputs_embeds and past_key_values.", "right": "issue:35707"}], "summary": "The cluster is mostly heterogeneous: it contains several unrelated model/runtime bugs across different subsystems. The only clear duplicate pair is the Qwen2_5_VLProcessor batched padding=False crash (issues 44514/44545)."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43606|issue:43653", "issue:41720|issue:45161", "issue:41720|issue:42617", "issue:43388|issue:45059", "issue:43540|issue:43782", "issue:42915|issue:43322", "issue:41720|issue:43873", "issue:36010|issue:42617", "issue:44514|issue:44545", "issue:33357|issue:35707"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6870, "estimated_input_tokens": 3307, "item_count": 18, "node_count": 18, "serialized_chars": 13226, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:24:39Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "34e9c597d835c1259f2f5e9c52bb5128c885bd87ef684dbed0efd0d1584325e8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30064", "issue:34634", "issue:41720", "issue:43381", "issue:43606", "issue:43653", "issue:43720", "issue:43746", "issue:43782", "issue:43844", "issue:44368", "issue:44451", "issue:44492", "issue:44509", "issue:44991", "issue:45072", "issue:45357", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:30064", "reason": "Different processors and bugs: image segmentation-map handling vs BarkProcessor voice_preset behavior.", "right": "issue:34634"}, {"accept": false, "left": "issue:41720", "reason": "Different failure paths: Qwen3 auto device mapping CUDA assert vs GraniteSpeech PEFT adapter local checkpoint loading.", "right": "issue:43746"}, {"accept": false, "left": "issue:44492", "reason": "One is a typo in cache strategy text; the other is stale docs for removed pipeline tasks.", "right": "issue:44509"}, {"accept": false, "left": "issue:43653", "reason": "Unrelated components: BigBirdTokenizer special-token registration vs BitNet packed-weight unpacking during accelerate load.", "right": "issue:43720"}, {"accept": false, "left": "issue:44368", "reason": "Both mention Qwen3.5, but one is a training-time warning about tie_word_embeddings and the other is a save_pretrained regression for visual encoder keys.", "right": "issue:45357"}, {"accept": false, "left": "issue:43381", "reason": "Both involve gradients, but one is eval-mode checkpointing misuse and the other is ZeRO-3 training instability; not the same bug.", "right": "issue:43844"}, {"accept": false, "left": "issue:43606", "reason": "Both are mismatch bugs, but they affect different models/code paths and different tensor properties (device offload vs bfloat16 dtype).", "right": "issue:45072"}, {"accept": false, "left": "issue:44451", "reason": "Both are tokenizer-loading failures, but for different models and likely different root causes.", "right": "issue:44991"}, {"accept": false, "left": "issue:43782", "reason": "Different Qwen VL problems: from_pretrained weight_only error vs incorrect vision_position_ids for video input.", "right": "issue:45381"}], "summary": "This cluster is not a duplicate set: the issues span unrelated models and failure modes (segmentation maps, Bark voice presets, Qwen device mapping, tokenizer registration, PEFT/local checkpoint loading, docs/typos, etc.). No soft pair looks like the same underlying bug or change, so there is no clear canonical issue to anchor the cluster."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:30064|issue:34634", "issue:41720|issue:43746", "issue:44492|issue:44509", "issue:43653|issue:43720", "issue:44368|issue:45357", "issue:43381|issue:43844", "issue:43606|issue:45072", "issue:44451|issue:44991", "issue:43782|issue:45381"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6940, "estimated_input_tokens": 3342, "item_count": 18, "node_count": 18, "serialized_chars": 13366, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:24:59Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ed5dae8ab1f6187b9df34b90b09d521729900bbc42d14a0b8a4705afcda8af70", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34567", "issue:34634", "issue:35707", "issue:41720", "issue:42886", "issue:43421", "issue:43550", "issue:43720", "issue:43819", "issue:43927", "issue:43994", "issue:44062", "issue:44361", "issue:44451", "issue:44671", "issue:44843", "issue:44918", "issue:44936"], "result": {"analyst_result": {"best_issue_reason": "issue:44843 is the best overall issue for triage because it is the most specific and actionable report of the offline tokenizer regression, with a clear reproduction context and probable fix location.", "best_pr_reason": null, "canonical_issue_reason": "issue:44843 is the strongest canonical issue: it states the offline failure clearly, names the exact code path (_patch_mistral_regex), and explains the likely root cause (unconditional model_info() call).", "canonical_pr_reason": null, "confidence": 0.77, "soft_edge_verdicts": [{"accept": false, "left": "issue:44918", "reason": "Both involve trainer behavior, but one is about unpacking Qwen3.5 input embeddings in TRL SFT and the other is a generic train/evaluate failure; different bugs.", "right": "issue:44936"}, {"accept": false, "left": "issue:44062", "reason": "Both are tokenizer/loading-related at a high level, but one is an AddedToken constructor error and the other is a model load failure for ScandiBERT; not the same code path.", "right": "issue:44451"}, {"accept": false, "left": "issue:34634", "reason": "Unrelated subsystems: BarkProcessor voice_preset vs Qwen3 auto device mapping cudaErrorAssert.", "right": "issue:41720"}, {"accept": false, "left": "issue:43550", "reason": "Different bugs and components: torch.compile/SDPA on Bamba versus an MLukeTokenizer AttributeError.", "right": "issue:44361"}, {"accept": false, "left": "issue:43550", "reason": "One is a compile-time/model execution issue; the other is config persistence losing custom token IDs. Too different to merge.", "right": "issue:43927"}, {"accept": true, "left": "issue:42886", "reason": "Same underlying offline tokenizer-loading regression: both report HF_HUB_OFFLINE/cache failure, and 44843 identifies the concrete culprit in _patch_mistral_regex/model_info().", "right": "issue:44843"}, {"accept": false, "left": "issue:34567", "reason": "TrainerState token-count tracking and progressive generation with inputs_embeds/past_key_values are unrelated bugs.", "right": "issue:35707"}, {"accept": false, "left": "issue:43421", "reason": "Both touch tokenizer metadata, but one is runtime post-processor refresh and the other is save/load loss of custom token IDs causing IndexError; different failures.", "right": "issue:43927"}, {"accept": false, "left": "issue:43720", "reason": "Distinct model-specific issues: BitNet accelerate loading unpacking versus DAC.from_latents mismatch with missing STE.", "right": "issue:43819"}, {"accept": false, "left": "issue:43994", "reason": "Both report incorrect outputs, but on different models and pipelines; no evidence they share the same root cause.", "right": "issue:44671"}], "summary": "One soft match stands out: the two offline-tokenizer reports appear to describe the same HF_HUB_OFFLINE/cache-loading failure. The rest are different model-, tokenizer-, or trainer-specific bugs that should remain separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44918|issue:44936", "issue:44062|issue:44451", "issue:34634|issue:41720", "issue:43550|issue:44361", "issue:43550|issue:43927", "issue:42886|issue:44843", "issue:34567|issue:35707", "issue:43421|issue:43927", "issue:43720|issue:43819", "issue:43994|issue:44671"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7198, "estimated_input_tokens": 3471, "item_count": 18, "node_count": 18, "serialized_chars": 13883, "soft_pair_count": 11}, "cached_at": "2026-04-14T20:25:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f49199be0777d5dd49a152fdb09a4eda9a1180194493a80973c34039641cef33", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39692", "issue:43421", "issue:43525", "issue:43531", "issue:43596", "issue:43653", "issue:44077", "issue:44186", "issue:44220", "issue:44589", "issue:44610", "issue:44625", "issue:44898", "issue:44987", "issue:45042", "issue:45072", "issue:45081", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "issue:45081 is the best overall anchor because it is specific, actionable, and tied to a distinct failure path that could be fixed directly; several other issues are broader or more model-specific.", "best_pr_reason": null, "canonical_issue_reason": "issue:45081 is the strongest single representative here: it describes a concrete, reproducible tokenizer-loading crash with a clearly named failing code path (`_patch_mistral_regex`) and active user impact.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43531", "reason": "Different subsystems and failures: Qwen3-MoE sliding-window behavior vs a Float8 storage lookup error. No shared code path.", "right": "issue:44589"}, {"accept": false, "left": "issue:44987", "reason": "Both involve loading/inference regressions, but one is a model-loading failure for `physical-intelligence/fast` and the other is an image-processor dependency regression requiring torchvision. Not the same bug.", "right": "issue:45042"}, {"accept": false, "left": "issue:44077", "reason": "`post_init` allowance for patchtsmixer is unrelated to Qwen3.5 label propagation between configs.", "right": "issue:44625"}, {"accept": false, "left": "issue:43421", "reason": "Both mention tokenizer special-token handling, but one is a backend post-processor update bug at runtime while the other is a BigBirdTokenizer mask-token registration issue. Different concrete defects.", "right": "issue:43653"}, {"accept": false, "left": "issue:43421", "reason": "Different tokenizer failures: runtime special-token refresh vs LayoutLMv2 NER/batched padding crashes.", "right": "issue:44186"}, {"accept": false, "left": "issue:44186", "reason": "LayoutLMv2 tokenizer padding/truncation crashes and Perceiver interpolation at non-default resolution are unrelated model-specific bugs.", "right": "issue:44898"}, {"accept": false, "left": "issue:45081", "reason": "Both touch Mistral-related tokenizer handling, but one is a crash in `_patch_mistral_regex` and the other is a Kimi-K2.5 regression about codec handling and a misleading warning. Related area, not the same bug.", "right": "issue:45356"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 docs/quantization example issues are unrelated to OmDet-Turbo processor/model resolution mismatch.", "right": "issue:44610"}, {"accept": false, "left": "issue:44610", "reason": "One is an image processor output-size mismatch; the other is a dtype mismatch in bfloat16 inference. Different failure modes.", "right": "issue:45072"}, {"accept": false, "left": "issue:43596", "reason": "DeepSpeed ZeRO-3/BertModel initialization crash is unrelated to `_torch_extract_fbank_features()`.", "right": "issue:44220"}, {"accept": false, "left": "issue:43525", "reason": "Llama4 `pad_token_id` config error is unrelated to Qwen3-MoE sliding-window behavior.", "right": "issue:43531"}], "summary": "These items are mostly a grab-bag of unrelated bugs across tokenizers, model configs, image processors, and inference/runtime issues. None of the soft pairs look like the same underlying bug strongly enough to merge."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43531|issue:44589", "issue:44987|issue:45042", "issue:44077|issue:44625", "issue:43421|issue:43653", "issue:43421|issue:44186", "issue:44186|issue:44898", "issue:45081|issue:45356", "issue:39692|issue:44610", "issue:44610|issue:45072", "issue:43596|issue:44220", "issue:43525|issue:43531"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6926, "estimated_input_tokens": 3335, "item_count": 17, "node_count": 17, "serialized_chars": 13337, "soft_pair_count": 11}, "cached_at": "2026-04-14T20:25:40Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "cf37cdcd194c3f0f4af30db87c7cb5e8074ffba286023bfb797a297ce5db40f1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39692", "issue:40444", "issue:43540", "issue:43550", "issue:43653", "issue:43716", "issue:43746", "issue:43819", "issue:43873", "issue:44008", "issue:44186", "issue:44220", "issue:44442", "issue:44661", "issue:44843", "issue:45081", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "issue:43873 is the broadest, still-open report with multiple inbound references and active discussion, so it is the most useful anchor among otherwise unrelated issues.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.86, "soft_edge_verdicts": [{"accept": false, "left": "issue:43746", "reason": "Different models and failure modes: GraniteSpeech PEFT local-checkpoint loading vs Qwen2.5-VL video vision_position_ids.", "right": "issue:45381"}, {"accept": false, "left": "issue:43540", "reason": "Different subsystems and bugs: Qwen2.5-VL iterable-dataset multi-image finetuning vs Gemma3n audio_mel_mask name collision.", "right": "issue:44008"}, {"accept": false, "left": "issue:43653", "reason": "Unrelated tokenizer special-token bug vs DAC latent/STE forward mismatch.", "right": "issue:43819"}, {"accept": false, "left": "issue:44220", "reason": "Different problems: fbank feature extraction issue vs add-new-model-like failing inside TOKENIZER_MAPPING_NAMES.", "right": "issue:44661"}, {"accept": false, "left": "issue:44843", "reason": "Same area (_patch_mistral_regex) but distinct concrete failures: offline model_info call vs backend_tokenizer AttributeError.", "right": "issue:45081"}, {"accept": false, "left": "issue:39692", "reason": "Both mention quantization, but one is a SigLIP2 docs example mismatch/failure and the other is a general offloading+quantization behavior issue.", "right": "issue:43873"}, {"accept": false, "left": "issue:43716", "reason": "Different multimodal regressions: Mistral-3 image dtype mismatch vs Gemma3n audio tensor attribute collision.", "right": "issue:44008"}, {"accept": false, "left": "issue:43550", "reason": "Different code paths and model families: torch.compile+SDPA in Bamba vs DAC from_latents/STE mismatch.", "right": "issue:43819"}, {"accept": false, "left": "issue:40444", "reason": "Different models and bugs: Qwen2.5-VL multi-image iterable dataset finetuning vs GraniteSpeech adapter loading from local checkpoints.", "right": "issue:43746"}, {"accept": false, "left": "issue:44186", "reason": "Both tokenizer-related, but the failures are unrelated: LayoutLMv2 NER/padding crashes vs AutoTokenizer loading FastSpeech2ConformerTokenizer.", "right": "issue:44442"}, {"accept": false, "left": "issue:43746", "reason": "Shared theme of model loading/quantization only; the concrete bugs are unrelated (PEFT local checkpoints vs offloading with quantization).", "right": "issue:43873"}], "summary": "This set does not look like one duplicate cluster; it is a collection of unrelated bug reports across tokenizers, multimodal models, quantization/offloading, and preprocessing. None of the soft pairs appear to be the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43746|issue:45381", "issue:43540|issue:44008", "issue:43653|issue:43819", "issue:44220|issue:44661", "issue:44843|issue:45081", "issue:39692|issue:43873", "issue:43716|issue:44008", "issue:43550|issue:43819", "issue:40444|issue:43746", "issue:44186|issue:44442", "issue:43746|issue:43873"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6594, "estimated_input_tokens": 3169, "item_count": 17, "node_count": 17, "serialized_chars": 12676, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:26:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8a9370021c36b95c0be5cad88900b6235d7b4774351a64b729d298a3006ab112", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30064", "issue:36010", "issue:43335", "issue:43421", "issue:43540", "issue:43550", "issue:43577", "issue:43618", "issue:43697", "issue:43720", "issue:43756", "issue:43818", "issue:44062", "issue:44488", "issue:44811", "issue:45072", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "issue:43618 is the most self-contained and clearly stated, but it is not representative of the rest of the cluster.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits: the items describe distinct bugs in different subsystems and model families.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43618", "reason": "Different regressions: CLIPOutput attentions field vs RTDetrV2 output changes in v5.", "right": "issue:43697"}, {"accept": false, "left": "issue:43335", "reason": "Unrelated bugs: SwitchTransformers sparse-layer config vs Smollm3 RoPE layer loss.", "right": "issue:43756"}, {"accept": false, "left": "issue:44062", "reason": "Both tokenization-related, but one is an AddedToken constructor error and the other is a model-loading failure; not the same bug.", "right": "issue:44488"}, {"accept": false, "left": "issue:43720", "reason": "Different code paths: BitNet packed-weight loading vs dtype mismatches in SwitchTransformers/TimmWrapperModel.", "right": "issue:45072"}, {"accept": false, "left": "issue:43550", "reason": "Both mention runtime/inference issues, but Bamba torch.compile SDPA failure is unrelated to dtype mismatch bugs.", "right": "issue:45072"}, {"accept": false, "left": "issue:30064", "reason": "Completely different domains: image segmentation map processing vs import error for GenerationMixin.", "right": "issue:36010"}, {"accept": false, "left": "issue:43421", "reason": "Both involve tokenizer/processor behavior, but one is runtime special-token post-processing and the other is Whisper batch_decode skip_special_tokens handling.", "right": "issue:44811"}, {"accept": false, "left": "issue:43540", "reason": "Both are video-input bugs, but Qwen3OmniMoe video processing error and qwen2.5-vl vision_position_ids mismatch are distinct failures.", "right": "issue:45381"}, {"accept": false, "left": "issue:43577", "reason": "Different multimodal dtype/initialization bugs: Blip2 dtype propagation vs Video-LLaVA missing temporal attention and near-duplicate towers.", "right": "issue:43818"}], "summary": "This cluster is a false-positive mix of unrelated transformer bugs across image/video processing, tokenization, dtype/config handling, and model-specific regressions. The soft pairs are mostly lexical matches, not the same underlying defect."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43618|issue:43697", "issue:43335|issue:43756", "issue:44062|issue:44488", "issue:43720|issue:45072", "issue:43550|issue:45072", "issue:30064|issue:36010", "issue:43421|issue:44811", "issue:43540|issue:45381", "issue:43577|issue:43818"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6554, "estimated_input_tokens": 3149, "item_count": 17, "node_count": 17, "serialized_chars": 12593, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:26:40Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ecf218ba833bfa178478c81c357d39c1cf7a3ec59d04dd3ac3f1e964ecde21a4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43531", "issue:43577", "issue:43582", "issue:43643", "issue:43644", "issue:43749", "issue:43782", "issue:43825", "issue:43994", "issue:44448", "issue:44561", "issue:44671", "issue:44877", "issue:44977", "issue:45020", "issue:45081", "issue:45406"], "result": {"analyst_result": {"best_issue_reason": "issue #45020 is the most representative and general issue in this group because it describes a broad class of recent v5 remote_code regressions rather than a single model-specific symptom.", "best_pr_reason": null, "canonical_issue_reason": "issue #45020 is the broadest/highest-level report in the set and the closest thing to a cluster umbrella, but the items here are not true duplicates of one another.", "canonical_pr_reason": null, "confidence": 0.87, "soft_edge_verdicts": [{"accept": false, "left": "issue:44561", "reason": "Both mention remote_code/trust_remote_code regressions, but one is specifically about removing is_torch_fx_available and the other is a broader set of recent-version breakages; not clearly the same underlying bug.", "right": "issue:45020"}, {"accept": false, "left": "issue:45081", "reason": "Different code paths and failures: Mistral tokenizer regex patching versus Gemma4Processor missing _tokenizer in serve.", "right": "issue:45406"}, {"accept": false, "left": "issue:43643", "reason": "Both involve config-loading behavior, but one is missing fields from AutoConfig with trust_remote_code and the other is strict config blocking granite_speech; different bugs.", "right": "issue:44877"}, {"accept": false, "left": "issue:44448", "reason": "Different models and symptoms: Pegasus output changes versus CamemBERT masked-LM predictions in v5.", "right": "issue:44671"}, {"accept": false, "left": "issue:43782", "reason": "Both are Qwen-related, but one is a from_pretrained weight_only error for Qwen3VL and the other is flash-attention generation issues for Qwen3.5; not the same concrete failure.", "right": "issue:44977"}, {"accept": false, "left": "issue:43531", "reason": "No meaningful overlap: sliding_window behavior in Qwen3-MoE versus AutoConfig/trust_remote_code missing fields.", "right": "issue:43643"}, {"accept": false, "left": "issue:43577", "reason": "Completely different areas: Blip2 dtype loading versus Apple Silicon caching_allocator_warmup TypeError.", "right": "issue:43582"}, {"accept": false, "left": "issue:43644", "reason": "Different regressions: non-persistent buffers being filled with junk versus FSDP CPU RAM efficient loading.", "right": "issue:43749"}, {"accept": false, "left": "issue:43825", "reason": "Different problems: an incorrect pipeline error message versus nonsensical SigLIP2 model/pipeline outputs.", "right": "issue:43994"}], "summary": "This cluster is mostly a set of unrelated issue reports that only share broad Transformers/v5 or model-family wording. I do not see any true duplicate pairs among the soft candidates; the closest semantic overlap is the remote_code regression area, but even that looks like different failures."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44561|issue:45020", "issue:45081|issue:45406", "issue:43643|issue:44877", "issue:44448|issue:44671", "issue:43782|issue:44977", "issue:43531|issue:43643", "issue:43577|issue:43582", "issue:43644|issue:43749", "issue:43825|issue:43994"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6862, "estimated_input_tokens": 3303, "item_count": 18, "node_count": 18, "serialized_chars": 13209, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:27:02Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e5347b2823a40db6917e61e61b1cc3f265b6897249e52b89c6ee9e1baaf18e84", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30064", "issue:36331", "issue:41720", "issue:42175", "issue:43650", "issue:43720", "issue:43782", "issue:43819", "issue:43825", "issue:43827", "issue:43906", "issue:44361", "issue:44610", "issue:44805", "issue:44936", "issue:44991", "issue:45081", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43819", "reason": "Different subsystems and failure modes: DAC latent/STE mismatch vs MLukeTokenizer task AttributeError.", "right": "issue:44361"}, {"accept": false, "left": "issue:44805", "reason": "Generic tensor mask shape mismatch is not the same as trainer.evaluate() failing after train(); too different a code path.", "right": "issue:44936"}, {"accept": false, "left": "issue:43650", "reason": "No shared bug: one is a vague/placeholder issue, the other is an isolated reproduction of a specific upstream report.", "right": "issue:43906"}, {"accept": false, "left": "issue:41720", "reason": "Unrelated problems: Qwen3 auto device mapping CUDA assert vs tokenizer loading for a specific RoBERTa model.", "right": "issue:44991"}, {"accept": false, "left": "issue:43720", "reason": "Different concrete failures: BitNet packed-weight loading under accelerate vs OmDet-Turbo processor image-size mismatch.", "right": "issue:44610"}, {"accept": false, "left": "issue:42175", "reason": "Packaging/backend install issue is unrelated to a tokenizer regex patch crash during Mistral loading.", "right": "issue:45081"}, {"accept": false, "left": "issue:43825", "reason": "Both concern v5 pipeline removals, but one is a runtime error message and the other is stale docs; separate artifacts and fixes.", "right": "issue:43827"}, {"accept": false, "left": "issue:30064", "reason": "Image processor segmentation-map handling and CustomTrainer compute_loss kwarg regression are unrelated.", "right": "issue:36331"}, {"accept": false, "left": "issue:43782", "reason": "Different Qwen VL models and different bugs: weight_only loading error vs rope-index position scaling issue.", "right": "issue:45325"}], "summary": "This cluster is highly heterogeneous: the paired items are mostly unrelated bugs in different subsystems/models. I rejected all soft edges; the only loosely related pair is the v5 pipeline-removal docs/runtime-message duo, but they still look like separate fixes rather than the same underlying issue."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43819|issue:44361", "issue:44805|issue:44936", "issue:43650|issue:43906", "issue:41720|issue:44991", "issue:43720|issue:44610", "issue:42175|issue:45081", "issue:43825|issue:43827", "issue:30064|issue:36331", "issue:43782|issue:45325"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6616, "estimated_input_tokens": 3180, "item_count": 17, "node_count": 17, "serialized_chars": 12719, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:27:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f14d457dda99b4252fe2970b5e19b70d241787841355e6c43bba946d383ee0df", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34634", "issue:36010", "issue:43262", "issue:43526", "issue:43582", "issue:43653", "issue:43697", "issue:43749", "issue:43819", "issue:44060", "issue:44291", "issue:44368", "issue:44485", "issue:44509", "issue:44554", "issue:44671", "issue:44857"], "result": {"analyst_result": {"best_issue_reason": "No single issue is a good representative; the cluster should be split because the reports cover distinct subsystems and failure modes.", "best_pr_reason": null, "canonical_issue_reason": "No canonical issue: the items are not duplicates and do not share a single underlying bug or change.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:44485", "reason": "Different topics: GLM-5 RoPE implementation vs removed docs pipeline tasks. No shared bug or code path.", "right": "issue:44509"}, {"accept": false, "left": "issue:34634", "reason": "BarkProcessor voice_preset failure and GenerationMixin ImportError are unrelated errors in different areas.", "right": "issue:36010"}, {"accept": false, "left": "issue:44060", "reason": "Both mention tie-weight warnings, but one is a Qwen3-Next incorrect parameter tying bug and the other is a generic LoRA config warning.", "right": "issue:44368"}, {"accept": false, "left": "issue:43582", "reason": "Apple Silicon caching allocator TypeError and CUDA AMP image-loss crash are unrelated platform-specific issues.", "right": "issue:44857"}, {"accept": false, "left": "issue:43749", "reason": "Both involve model loading, but one is FSDP CPU RAM efficient loading and the other is an init_empty_weights signature mismatch; not the same bug.", "right": "issue:44291"}, {"accept": false, "left": "issue:43526", "reason": "BeitImageProcessorFast reduce_labels bug and RTDetrV2 output mismatch are different models and different failure modes.", "right": "issue:43697"}, {"accept": false, "left": "issue:43653", "reason": "Tokenizer special-token registration bug and CamemBERT masked-LM prediction bug are not the same underlying issue.", "right": "issue:44671"}, {"accept": false, "left": "issue:43526", "reason": "A vision processor label-handling bug is unrelated to an MPS attention correctness issue.", "right": "issue:44554"}, {"accept": false, "left": "issue:43262", "reason": "Audio apply_chat_template sampling-rate default mismatch and DAC.from_latents forward mismatch are distinct audio bugs with different code paths.", "right": "issue:43819"}], "summary": "This cluster is heterogeneous: the issues span unrelated bugs in audio processors, tokenizers, model loading, vision processors, attention behavior, docs, and warnings. I do not see a true duplicate set or a meaningful canonical issue to merge around."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44485|issue:44509", "issue:34634|issue:36010", "issue:44060|issue:44368", "issue:43582|issue:44857", "issue:43749|issue:44291", "issue:43526|issue:43697", "issue:43653|issue:44671", "issue:43526|issue:44554", "issue:43262|issue:43819"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6892, "estimated_input_tokens": 3318, "item_count": 18, "node_count": 18, "serialized_chars": 13271, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:32:33Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0c59736d18e62e2a9ea4654b67e7c4096f8d2eb3393b815c01864445215deb4f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34634", "issue:36331", "issue:43257", "issue:43329", "issue:43452", "issue:43454", "issue:43577", "issue:43638", "issue:43653", "issue:43746", "issue:43854", "issue:44589", "issue:44811", "issue:44912", "issue:44936", "issue:44977", "issue:44991", "issue:45042"], "result": {"analyst_result": {"best_issue_reason": "Issue 44977 is the most self-contained and concrete (specific model, specific failure mode, clear symptom), so it is the best representative issue if one must be chosen.", "best_pr_reason": null, "canonical_issue_reason": "No clear canonical duplicate exists in this set because the items are unrelated issues rather than one bug reported multiple times. If forced to pick a representative report, issue 44977 is the clearest standalone bug report.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43452", "reason": "Both are loading/tokenizer problems, but they describe different failure modes: gguf_file handling for AutoTokenizer/AutoModel versus a transformers 5.0 tokenizer regression for a specific model.", "right": "issue:44991"}, {"accept": false, "left": "issue:43638", "reason": "Unrelated bugs: one is a Zero3/non-pretrained Bert IndexError during training, the other is evaluate() failing after train().", "right": "issue:44936"}, {"accept": false, "left": "issue:43454", "reason": "Different models and different root causes: lm_head weight tying in AyaVision versus dtype propagation for Blip2/Qformer.", "right": "issue:43577"}, {"accept": false, "left": "issue:43746", "reason": "These affect different code paths and symptoms: GraniteSpeech local checkpoint loading with PEFT versus Whisper processor batch_decode skip_special_tokens handling.", "right": "issue:44811"}, {"accept": false, "left": "issue:34634", "reason": "No shared underlying bug: BarkProcessor voice_preset is unrelated to a CustomTrainer.compute_loss signature mismatch.", "right": "issue:36331"}, {"accept": false, "left": "issue:43653", "reason": "Tokenizer special-token registration causing empty decode is not the same as MXFP4 quantization falling back to bf16.", "right": "issue:44912"}, {"accept": false, "left": "issue:44589", "reason": "A float8 storage lookup error is unrelated to PIL image processors unnecessarily requiring torchvision.", "right": "issue:45042"}, {"accept": false, "left": "issue:43854", "reason": "Different models and paths: GLM-4.7-Flash unit-test loading versus Qwen3.5 flash-attention generation behavior.", "right": "issue:44977"}, {"accept": false, "left": "issue:43257", "reason": "These are distinct: Qwen3 MOE weight conversion under accelerate/deepspeed versus a multimodal token-count helper using undefined video-branch symbols.", "right": "issue:43329"}], "summary": "No soft pair looks like the same underlying bug/change; the issues span unrelated models, trainer APIs, tokenizers, quantization, and processor logic. I would not merge any of the suggested pairs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43452|issue:44991", "issue:43638|issue:44936", "issue:43454|issue:43577", "issue:43746|issue:44811", "issue:34634|issue:36331", "issue:43653|issue:44912", "issue:44589|issue:45042", "issue:43854|issue:44977", "issue:43257|issue:43329"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6826, "estimated_input_tokens": 3285, "item_count": 18, "node_count": 18, "serialized_chars": 13138, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:32:43Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "233917ac4d22f3abd9b467179cfd692ac540269b2f97cfcdaafbb6e2122b8367", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:38175", "issue:39692", "issue:42175", "issue:43257", "issue:43262", "issue:43531", "issue:43645", "issue:43696", "issue:43749", "issue:44568", "issue:44610", "issue:44756", "issue:44811", "issue:44912", "issue:44938", "issue:45042", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "issue #43749 is the strongest representative only in the sense of triage signal: it has the most discussion and inbound references, and it describes a clear, core loading regression. It is not a duplicate hub for the rest of the cluster.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:44568", "reason": "Both are tokenizer-related, but they describe different regressions: missing BOS/EOS insertion for mDeBERTa vs codec handling and warning behavior for Kimi-K2.5.", "right": "issue:45356"}, {"accept": false, "left": "issue:36010", "reason": "Different problems: a specific GenerationMixin import error vs a general Transformers 5.0 notebook custom-model initialization regression.", "right": "issue:43645"}, {"accept": false, "left": "issue:38175", "reason": "Different model behaviors and code paths: SigLIP2 zero probabilities vs OmDet-Turbo processor output-size mismatch.", "right": "issue:44610"}, {"accept": false, "left": "issue:43257", "reason": "Qwen3 MoE weight conversion under accelerate/deepspeed is unrelated to GPT-oss-20b CUDA OOM on load.", "right": "issue:43696"}, {"accept": false, "left": "issue:39692", "reason": "One is a SigLIP2 documentation/example error; the other is an MXFP4 quantization fallback issue for git-oss-20b. Same general theme, but not the same bug.", "right": "issue:44912"}, {"accept": false, "left": "issue:42175", "reason": "Backend packaging/install dependency issue vs Python 3.14 loading failure; different failure modes and likely different root causes.", "right": "issue:44938"}, {"accept": false, "left": "issue:43262", "reason": "Both involve audio processors, but one is a chat-template sampling-rate default bug and the other is batch_decode ignoring skip_special_tokens.", "right": "issue:44811"}, {"accept": false, "left": "issue:43749", "reason": "Both mention memory/loading pain points, but one is broken FSDP CPU RAM efficient loading and the other is Strix Halo mmap-induced OOM; not the same underlying defect.", "right": "issue:44756"}, {"accept": false, "left": "issue:43531", "reason": "Qwen3-MoE sliding_window behavior is unrelated to PIL backend processors incorrectly requiring torchvision.", "right": "issue:45042"}], "summary": "These issues are broadly heterogeneous and do not look like duplicate reports of one underlying bug. They span unrelated areas such as tokenizers, processors, loading, quantization, environment/backend packaging, and model-specific failures; all soft-edge pairs should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44568|issue:45356", "issue:36010|issue:43645", "issue:38175|issue:44610", "issue:43257|issue:43696", "issue:39692|issue:44912", "issue:42175|issue:44938", "issue:43262|issue:44811", "issue:43749|issue:44756", "issue:43531|issue:45042"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6584, "estimated_input_tokens": 3164, "item_count": 17, "node_count": 17, "serialized_chars": 12656, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:32:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8b6867095de75343e97cf8510b71d7768b6cdef9fd055557837d9afaa3ff63c6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36010", "issue:36331", "issue:41720", "issue:43257", "issue:43329", "issue:43452", "issue:43540", "issue:43550", "issue:43582", "issue:43697", "issue:43756", "issue:43931", "issue:44361", "issue:44442", "issue:44479", "issue:44912", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": "Issue 44479 is the strongest standalone representative: it is explicit about the regression scope, affected model families, and version, making it the most useful anchor for deduplication.", "best_pr_reason": null, "canonical_issue_reason": "Among the accepted duplicate pair, issue 44479 is the better canonical because it cleanly describes the broader v5.3.0 video-input regression across multiple Qwen VL/Omni variants, while 43329 is a narrower root-cause report for the same failure area.", "canonical_pr_reason": null, "confidence": 0.74, "soft_edge_verdicts": [{"accept": true, "left": "issue:43329", "reason": "Both describe the same video-input failure path in Qwen multimodal models. 43329 identifies the likely root cause in the video branch, and 44479 reports the broader regression across the same video-capable Qwen variants.", "right": "issue:44479"}, {"accept": false, "left": "issue:43257", "reason": "Different bugs: one is Qwen3 MoE weight-conversion/loading with accelerate+deepspeed, the other is a Smollm3 RoPE-layer-count issue.", "right": "issue:43756"}, {"accept": false, "left": "issue:43550", "reason": "Different failure modes and subsystems: torch.compile/SDPA on Bamba versus MXFP4 quantization fallback on git-oss-20b.", "right": "issue:44912"}, {"accept": false, "left": "issue:36010", "reason": "Unrelated symptoms: an import path break for GenerationMixin versus a Trainer subclass signature mismatch.", "right": "issue:36331"}, {"accept": false, "left": "issue:43697", "reason": "One is an inference-output consistency regression in RTDetrV2; the other is an incorrect save_pretrained key regression for Qwen3.5 visual encoder weights.", "right": "issue:45357"}, {"accept": false, "left": "issue:43257", "reason": "Both involve Qwen models, but one is a deepspeed/accelerate conversion problem and the other is a model weight-shape mismatch during loading; not the same bug.", "right": "issue:43931"}, {"accept": false, "left": "issue:41720", "reason": "Different models and code paths: auto device mapping with cudaErrorAssert on Qwen3 versus video-input validation errors in Qwen3OmniMoe.", "right": "issue:43540"}, {"accept": false, "left": "issue:43452", "reason": "Tokenizer/model-loading issues for gguf files are unrelated to an Apple Silicon allocator warmup TypeError.", "right": "issue:43582"}, {"accept": false, "left": "issue:44361", "reason": "These are separate tokenizer-loading failures for different tokenizers and different underlying causes.", "right": "issue:44442"}], "summary": "One likely duplicate pair stands out around the Qwen video-input regression; the rest are clearly separate bugs affecting different code paths, models, or failure modes."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43329|issue:44479", "issue:43257|issue:43756", "issue:43550|issue:44912", "issue:36010|issue:36331", "issue:43697|issue:45357", "issue:43257|issue:43931", "issue:41720|issue:43540", "issue:43452|issue:43582", "issue:44361|issue:44442"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7168, "estimated_input_tokens": 3456, "item_count": 18, "node_count": 18, "serialized_chars": 13822, "soft_pair_count": 11}, "cached_at": "2026-04-14T20:37:14Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6dfb1a28c5762b129dd0f2a2b069be44564ece61ec3e7953fc984da01ee1a788", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42175", "issue:42915", "issue:43329", "issue:43450", "issue:43540", "issue:43577", "issue:43824", "issue:43906", "issue:44062", "issue:44291", "issue:44479", "issue:44488", "issue:44560", "issue:44821", "issue:44991", "issue:45042", "issue:45072", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "issue:44479 is the best global issue candidate for consolidation: it is the broadest, most central report among the only plausible duplicate pocket and is the most likely umbrella tracker.", "best_pr_reason": null, "canonical_issue_reason": "issue:44479 is the strongest canonical issue because it describes the broadest concrete regression surface (multiple Qwen VL variants on 5.3.0) and likely subsumes the narrower qwen2.5-vl video-position-id report.", "canonical_pr_reason": null, "confidence": 0.74, "soft_edge_verdicts": [{"accept": false, "left": "issue:44062", "reason": "Different failure modes: AddedToken/special-kwarg constructor error vs tokenizer loading failure for a specific model. Same tokenizer area, but not the same underlying bug.", "right": "issue:44991"}, {"accept": false, "left": "issue:43329", "reason": "Both involve video processors, but one is an undefined-name bug in multimodal token counting and the other is an incorrect batched output shape. Distinct concrete problems.", "right": "issue:43450"}, {"accept": true, "left": "issue:44479", "reason": "Both report the same Qwen video-input regression on transformers 5.3.0. The narrower 'vision_position_ids seems to be wrong' symptom plausibly describes the same root cause as the broader video regression report.", "right": "issue:45381"}, {"accept": false, "left": "issue:42175", "reason": "Unrelated issues: one is about an optional backend missing from a pip extra, the other is an ImportError for a specific model class. No shared bug.", "right": "issue:43824"}, {"accept": false, "left": "issue:43540", "reason": "Different models and exceptions: Qwen3OmniMoe video ValueError vs Qwen3-vl-embedding StopIteration. Too different to be the same bug.", "right": "issue:44560"}, {"accept": false, "left": "issue:44291", "reason": "One is an init_empty_weights loading argument incompatibility; the other is an image processor backend dependency issue. Different code paths and fixes.", "right": "issue:45042"}, {"accept": false, "left": "issue:43577", "reason": "Both mention dtype, but one is a Blip2 loading-state problem and the other is a CI inference mismatch for different model classes. Not the same concrete defect.", "right": "issue:45072"}, {"accept": false, "left": "issue:42175", "reason": "Backend packaging/install issue vs a model load failure for cjvt/sleng-bert. No evidence of a shared root cause.", "right": "issue:44488"}, {"accept": false, "left": "issue:42915", "reason": "Qwen3Moe FP8 config failure and AutoImageProcessor URL loading are unrelated bugs in different parts of the stack.", "right": "issue:44821"}, {"accept": false, "left": "issue:43906", "reason": "An isolated reproduction of another external issue is not the same as the Qwen video-position-id regression. Different subject and likely different fix.", "right": "issue:45381"}, {"accept": false, "left": "issue:43540", "reason": "Both are video-related Qwen issues, but they target different models and error types. They may share a subsystem, but not enough to treat as the same underlying bug.", "right": "issue:44479"}], "summary": "This set is mostly heterogeneous issue reports. Only the Qwen video-regression pair looks plausibly duplicate; the other soft pairs are related by broad subsystem words but describe different concrete bugs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44062|issue:44991", "issue:43329|issue:43450", "issue:44479|issue:45381", "issue:42175|issue:43824", "issue:43540|issue:44560", "issue:44291|issue:45042", "issue:43577|issue:45072", "issue:42175|issue:44488", "issue:42915|issue:44821", "issue:43906|issue:45381", "issue:43540|issue:44479"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6510, "estimated_input_tokens": 3127, "item_count": 17, "node_count": 17, "serialized_chars": 12506, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:37:33Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e948a0ab90cde8383d9b3d0bb714a5b5b535b20370fbc74b3eaa97eb9be15574", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33357", "issue:34567", "issue:36010", "issue:38175", "issue:42175", "issue:42915", "issue:43582", "issue:43720", "issue:43742", "issue:43756", "issue:43782", "issue:44484", "issue:44704", "issue:44898", "issue:45137", "issue:45325", "issue:45405"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:42915", "reason": "Both are loading-related, but one is a Qwen3Moe FineGrainedFP8Config failure and the other is a MobileLLM key error; different models and underlying bugs.", "right": "issue:43742"}, {"accept": false, "left": "issue:33357", "reason": "MacOS bus error on CLIP model loading is unrelated to TrainerState num_input_tokens_seen not updating.", "right": "issue:34567"}, {"accept": false, "left": "issue:43720", "reason": "BitNet packed-weight/accelerate loading failure and Perceiver interpolation failure affect different code paths and models.", "right": "issue:44898"}, {"accept": false, "left": "issue:45137", "reason": "DeepSpeed ZeRO3 deque IndexError is unrelated to a PEFT version-bump packaging issue.", "right": "issue:45405"}, {"accept": false, "left": "issue:36010", "reason": "GenerationMixin import failure and missing TensorFlow in the torch extra are different packaging/import problems.", "right": "issue:42175"}, {"accept": false, "left": "issue:43756", "reason": "Both mention RoPE, but Smollm3 layer-selection behavior and Qwen2.5-VL temporal position scaling are distinct bugs.", "right": "issue:45325"}, {"accept": false, "left": "issue:44484", "reason": "save_pretrained shard-size defaults and AutoProcessor kwarg forwarding are separate API issues.", "right": "issue:44704"}, {"accept": false, "left": "issue:43582", "reason": "Apple Silicon caching_allocator_warmup TypeError is unrelated to Qwen3VL weight_only loading errors.", "right": "issue:43782"}, {"accept": false, "left": "issue:38175", "reason": "SigLIP2 zero-probability output and Perceiver positional interpolation failure are different model-specific issues.", "right": "issue:44898"}], "summary": "No duplicate-worthy cluster here: the soft-similarity pairs are only superficially related (same broad model/loading/vision language) but describe different models, code paths, and failure modes."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42915|issue:43742", "issue:33357|issue:34567", "issue:43720|issue:44898", "issue:45137|issue:45405", "issue:36010|issue:42175", "issue:43756|issue:45325", "issue:44484|issue:44704", "issue:43582|issue:43782", "issue:38175|issue:44898"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7038, "estimated_input_tokens": 3391, "item_count": 18, "node_count": 18, "serialized_chars": 13564, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:37:40Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3a2754d562aac05c80a6d7e9a5c0b8cb1be567f6b12fe3ef4dce53f1861e7217", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:28282", "issue:29127", "issue:34567", "issue:34689", "issue:36331", "issue:43452", "issue:43643", "issue:43646", "issue:43653", "issue:43742", "issue:43749", "issue:44062", "issue:44291", "issue:44361", "issue:44488", "issue:44821", "issue:45081", "issue:45375"], "result": {"analyst_result": {"best_issue_reason": "Issue 43646 is the broadest user-facing regression report in the set, but the cluster is too heterogeneous for a strong canonical issue choice.", "best_pr_reason": null, "canonical_issue_reason": "No single issue cleanly represents the whole set; the reports cover distinct code paths and regressions. If forced to pick the broadest, issue 43646 is the most general, but it is not a duplicate of the others.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43643", "reason": "Both mention missing fields in config objects, but they affect different configs and different mechanisms (`trust_remote_code` vs `@strict` dropping a field). Not the same bug.", "right": "issue:45375"}, {"accept": false, "left": "issue:44062", "reason": "Both are tokenizer-related TypeErrors, but one is about `AddedToken(special=...)` and the other is a Mistral regex patch accessing `backend_tokenizer`. Different failure sites and causes.", "right": "issue:45081"}, {"accept": false, "left": "issue:43646", "reason": "Different regressions: custom model initialization breakage versus FSDP CPU RAM efficient loading. Same broad release scope, not the same underlying code path.", "right": "issue:43749"}, {"accept": false, "left": "issue:43742", "reason": "Both involve loading/model init issues, but one is a key error for MobileLLM and the other is an `init_empty_weights`/`_is_hf_initialized` argument mismatch. Not duplicates.", "right": "issue:44291"}, {"accept": false, "left": "issue:28282", "reason": "Both are model-loading related, but one is missing PyTorch dependency detection and the other is a Llama 3.2 vision loading regression. Different environments and root causes.", "right": "issue:34689"}, {"accept": false, "left": "issue:43452", "reason": "Both mention tokenizer/model loading, but one is about `gguf_file` handling and the other is a specific tokenizer load failure for `cjvt/sleng-bert`. Not the same defect.", "right": "issue:44488"}, {"accept": false, "left": "issue:44291", "reason": "`init_empty_weights` initialization error is unrelated to inability to load `AutoImageProcessor` from a URL; different API surfaces and code paths.", "right": "issue:44821"}, {"accept": false, "left": "issue:43653", "reason": "Both are tokenizer-specific bugs, but one is about special-token registration for BigBirdTokenizer and the other is an AttributeError in MLukeTokenizer tasks. Different tokenizers and behaviors.", "right": "issue:44361"}, {"accept": false, "left": "issue:28282", "reason": "PyTorch missing-library ImportError and LayoutLMv3 error messaging for insufficient box info are unrelated issues; only the generic error-reporting theme overlaps.", "right": "issue:29127"}, {"accept": false, "left": "issue:34567", "reason": "TrainerState token counter not updating and `compute_loss()` signature mismatch are distinct trainer API issues with different symptoms and fixes.", "right": "issue:36331"}], "summary": "This cluster is mostly a set of unrelated Transformers bug reports spanning tokenizer loading, config parsing, trainer API changes, and model-specific failures. The soft-similarity pairs share only surface-level patterns (loading errors, missing fields, TypeErrors) but not the same underlying defect."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43643|issue:45375", "issue:44062|issue:45081", "issue:43646|issue:43749", "issue:43742|issue:44291", "issue:28282|issue:34689", "issue:43452|issue:44488", "issue:44291|issue:44821", "issue:43653|issue:44361", "issue:28282|issue:29127", "issue:34567|issue:36331"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6530, "estimated_input_tokens": 3137, "item_count": 17, "node_count": 17, "serialized_chars": 12547, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:37:53Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6af5275d7d69869aad1dbaa8029f0e4e388ed569307fcf3fdb9339e97cbac0b4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43531", "issue:43643", "issue:43653", "issue:43688", "issue:43742", "issue:43749", "issue:43927", "issue:44514", "issue:44561", "issue:44589", "issue:44792", "issue:44821", "issue:44964", "issue:45042", "issue:45081", "issue:45290", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "No single issue is a strong canonical representative: the reports span distinct subsystems and failure modes (loading, generation, chat templating, tokenizer/config handling, multimodal processing).", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43688", "reason": "Different problems: auxiliary-loss normalization in MoE models vs broken FSDP CPU RAM-efficient loading. Same broad training area, but not the same bug.", "right": "issue:43749"}, {"accept": false, "left": "issue:44792", "reason": "Both touch multimodal image models, but one is a failing janus image-generation test and the other is a Phi-4 multimodal load failure. Different code paths and symptoms.", "right": "issue:44964"}, {"accept": false, "left": "issue:43643", "reason": "Both involve trust_remote_code, but one is missing fields from AutoConfig.from_pretrained and the other is a v5.0 break caused by removing is_torch_fx_available. Not the same defect.", "right": "issue:44561"}, {"accept": false, "left": "issue:44514", "reason": "Both mention apply_chat_template, but one crashes on batched padded input and the other on assistant tool-call messages with empty content. Separate edge cases.", "right": "issue:45290"}, {"accept": false, "left": "issue:43531", "reason": "Qwen3-MoE sliding_window behavior vs loading AutoImageProcessor from URL are unrelated.", "right": "issue:44821"}, {"accept": false, "left": "issue:43742", "reason": "KeyError while loading MobileLLM-125M vs Float8 storage deserialization failure are unrelated loading/runtime bugs.", "right": "issue:44589"}, {"accept": false, "left": "issue:43653", "reason": "Tokenizer special-token registration causing empty decode vs DiaConfig losing custom token IDs after save/load. Both involve token metadata, but different components and failures.", "right": "issue:43927"}, {"accept": false, "left": "issue:44514", "reason": "apply_chat_template padding crash vs Qwen2.5-VL video vision_position_ids bug are different multimodal issues.", "right": "issue:45381"}, {"accept": false, "left": "issue:45042", "reason": "PIL backend incorrectly requiring torchvision vs Mistral regex patch AttributeError are unrelated.", "right": "issue:45081"}], "summary": "This is a heterogeneous cluster of unrelated bug reports across different models, tokenizers, processors, and loading paths. None of the soft pairs look like the same underlying issue, so I would not merge any of them."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43688|issue:43749", "issue:44792|issue:44964", "issue:43643|issue:44561", "issue:44514|issue:45290", "issue:43531|issue:44821", "issue:43742|issue:44589", "issue:43653|issue:43927", "issue:44514|issue:45381", "issue:45042|issue:45081"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7148, "estimated_input_tokens": 3446, "item_count": 18, "node_count": 18, "serialized_chars": 13782, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:38:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "32bedb33b7a8bda9804219d939c2bdf602f86bc80dbbc695b36bafd188a4e04d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41762", "issue:43295", "issue:43452", "issue:43504", "issue:43550", "issue:43577", "issue:43606", "issue:43653", "issue:43688", "issue:43697", "issue:43720", "issue:44568", "issue:44843", "issue:44898", "issue:44936", "issue:45216", "issue:45356", "issue:45440"], "result": {"analyst_result": {"best_issue_reason": "issue:44843 is the most actionable and broadly representative of the cluster's loose loading/tokenizer theme, but only as a representative issue\u2014not as a duplicate target.", "best_pr_reason": null, "canonical_issue_reason": "issue:44843 is the best representative anchor because it is a concrete, reproducible tokenizer-loading regression with a clear failure mode and user impact. That said, it is not a duplicate of the others; the cluster is too diverse for a single canonical bug.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43452", "reason": "Both involve from_pretrained/tokenizer loading, but the concrete bugs differ: gguf_file breaks tokenizer/model loading, while 44843 is an offline failure in _patch_mistral_regex/model_info. Different code paths and symptoms.", "right": "issue:44843"}, {"accept": false, "left": "issue:43504", "reason": "Different models and failure modes: Beit legacy-field loading vs BitNet packed-weight unpacking during accelerate loading. Same broad loading theme, not the same bug.", "right": "issue:43720"}, {"accept": false, "left": "issue:43697", "reason": "RTDetrV2 output drift in v5 and Qwen3.5 save_pretrained corruption are unrelated regressions affecting different subsystems.", "right": "issue:45216"}, {"accept": false, "left": "issue:43452", "reason": "gguf_file breaks AutoTokenizer/AutoModel loading, while BLIP2 has an incorrect dtype propagation issue. Not the same underlying defect.", "right": "issue:43577"}, {"accept": false, "left": "issue:43688", "reason": "Both are MoE-related, but one is about auxiliary loss normalization and the other about DeepSeekV3 implementation divergence. Different bug classes.", "right": "issue:45440"}, {"accept": false, "left": "issue:43504", "reason": "Beit legacy-field loading and Bamba torch.compile/SDPA failure are unrelated; same broad ML area only.", "right": "issue:43550"}, {"accept": false, "left": "issue:41762", "reason": "Gemma3 ZeRO-3 loading IndexError and trainer.evaluate() after trainer.train() are different lifecycle issues with different failure contexts.", "right": "issue:44936"}, {"accept": false, "left": "issue:43295", "reason": "Both are tokenizer-adjacent regressions, but one is about processor.tokenizer/image passing and the other is Kimi codec handling plus a misleading warning. Not the same bug.", "right": "issue:45356"}, {"accept": false, "left": "issue:43606", "reason": "CPU offload device mismatch in bark-small and Perceiver interpolate_pos_encoding failure are different model-specific bugs.", "right": "issue:44898"}, {"accept": false, "left": "issue:43653", "reason": "Both concern special tokens, but BigBird's mask token registration and mDeBERTa's add_special_tokens BOS/EOS omission are distinct tokenizer defects.", "right": "issue:44568"}], "summary": "This cluster is heterogeneous: the items span unrelated tokenizer, model-loading, training, and vision-model regressions. None of the soft pairs look like the same underlying bug, so there is no true duplicate group here."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43452|issue:44843", "issue:43504|issue:43720", "issue:43697|issue:45216", "issue:43452|issue:43577", "issue:43688|issue:45440", "issue:43504|issue:43550", "issue:41762|issue:44936", "issue:43295|issue:45356", "issue:43606|issue:44898", "issue:43653|issue:44568"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6602, "estimated_input_tokens": 3173, "item_count": 17, "node_count": 17, "serialized_chars": 12691, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:38:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0d6e6b78619f6043e7cab6921b271f353ae2882c09aafe67923ac9ba0aea9c3d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41720", "issue:43504", "issue:43525", "issue:43606", "issue:43643", "issue:43720", "issue:43906", "issue:43927", "issue:44464", "issue:44466", "issue:44610", "issue:44625", "issue:44743", "issue:44843", "issue:44877", "issue:44991", "issue:45005"], "result": {"analyst_result": {"best_issue_reason": "43643 is the best single issue to keep because it is open, generalizes the narrower Llama4 symptom, and points at the underlying config-loading defect.", "best_pr_reason": null, "canonical_issue_reason": "43643 is the broader, more general report about `trust_remote_code=True` dropping fields from `AutoConfig` results; 43525 looks like a concrete symptom of that same missing-field problem (`pad_token_id`).", "canonical_pr_reason": null, "confidence": 0.67, "soft_edge_verdicts": [{"accept": false, "left": "issue:43504", "reason": "Different bugs: legacy-field/preset loading for BEiT segmentation vs processor/model input-size mismatch for OmDet-Turbo.", "right": "issue:44610"}, {"accept": false, "left": "issue:44625", "reason": "Both involve config handling, but one is `num_labels` propagation and the other is strict config blocking `granite_speech`; not the same failure path.", "right": "issue:44877"}, {"accept": false, "left": "issue:41720", "reason": "The second is only an isolated reproduction of another issue, and there is no evidence it matches the Qwen3 auto-device-mapping CUDA assert.", "right": "issue:43906"}, {"accept": false, "left": "issue:44464", "reason": "Both concern generation/state behavior, but one is chunked generation with compiled forward and the other is recurrent state reset with cache; too different to merge safely.", "right": "issue:44743"}, {"accept": false, "left": "issue:44843", "reason": "Tokenizer-loading/offline regression vs a specific tokenizer failure on EMBEDDIA/est-roberta; same area, but not clearly the same concrete bug.", "right": "issue:44991"}, {"accept": false, "left": "issue:43720", "reason": "Packed-weight loading during accelerate vs config token IDs lost on save/load; unrelated code paths and symptoms.", "right": "issue:43927"}, {"accept": false, "left": "issue:43606", "reason": "CPU-offload device mismatch is unrelated to DiaConfig losing custom token IDs and causing generation errors.", "right": "issue:43927"}, {"accept": true, "left": "issue:43525", "reason": "Very likely the same underlying missing-field/config-loading bug: 43525's missing `pad_token_id` is a concrete example of fields being dropped from the returned config object.", "right": "issue:43643"}, {"accept": false, "left": "issue:44466", "reason": "Both mention tied weights in v5, but one is a device-dependent `lm_head.weight` serialization inconsistency and the other is a broader translation-model tied-weights report; not clearly the same fix.", "right": "issue:45005"}], "summary": "Mostly unrelated bug reports with one likely duplicate pair around missing config fields during AutoConfig loading. No PRs present."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43504|issue:44610", "issue:44625|issue:44877", "issue:41720|issue:43906", "issue:44464|issue:44743", "issue:44843|issue:44991", "issue:43720|issue:43927", "issue:43606|issue:43927", "issue:43525|issue:43643", "issue:44466|issue:45005"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6594, "estimated_input_tokens": 3169, "item_count": 17, "node_count": 17, "serialized_chars": 12675, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:39:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4636e2045900cfbf553d378922e1d4edf813d8acd2c0ccb74a09c2d4556ccd4b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:24643", "issue:29127", "issue:38175", "issue:40444", "issue:43122", "issue:43295", "issue:43577", "issue:43697", "issue:43720", "issue:43819", "issue:43906", "issue:44361", "issue:44479", "issue:44811", "issue:44936", "issue:44977", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43819", "reason": "Different features and code paths: DAC latent decoding vs Whisper batch_decode token skipping.", "right": "issue:44811"}, {"accept": false, "left": "issue:40444", "reason": "Qwen2.5-VL multi-image IterableDataset failure is unrelated to Whisper decoding behavior.", "right": "issue:44811"}, {"accept": false, "left": "issue:43577", "reason": "BLIP2 dtype loading bug and RTDetrV2 output regression are separate model-specific issues.", "right": "issue:43697"}, {"accept": false, "left": "issue:43720", "reason": "BitNet packed-weight loading and MLukeTokenizer task AttributeError are unrelated subsystems.", "right": "issue:44361"}, {"accept": false, "left": "issue:24643", "reason": "DeepSpeed weight shape error and LayoutLMv3 box-info messaging are different problems.", "right": "issue:29127"}, {"accept": false, "left": "issue:44977", "reason": "Qwen3.5 flash-attention generation regression is unrelated to Kimi-K2.5 tokenizer codec handling.", "right": "issue:45356"}, {"accept": false, "left": "issue:38175", "reason": "SigLIP2 zero-probability inference issue and processor.tokenizer regression are distinct failures.", "right": "issue:43295"}, {"accept": false, "left": "issue:43122", "reason": "Both mention tokenization, but one is a version-difference report and the other is only a reproduction of another issue; no clear same bug linkage.", "right": "issue:43906"}, {"accept": false, "left": "issue:44479", "reason": "Video-input regression for Qwen models and trainer.evaluate() failing after train() are unrelated.", "right": "issue:44936"}], "summary": "This cluster appears to be a false-positive grouping of unrelated issue reports across different models, tokenizers, training, and inference paths. None of the soft pairs look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43819|issue:44811", "issue:40444|issue:44811", "issue:43577|issue:43697", "issue:43720|issue:44361", "issue:24643|issue:29127", "issue:44977|issue:45356", "issue:38175|issue:43295", "issue:43122|issue:43906", "issue:44479|issue:44936"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7030, "estimated_input_tokens": 3387, "item_count": 18, "node_count": 18, "serialized_chars": 13545, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:39:21Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "942ffa8f50bb85852b4c60d2939f81efdb3c1facac95028d16c352ce7efdf1c1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:28282", "issue:30064", "issue:32090", "issue:33357", "issue:33666", "issue:35141", "issue:43329", "issue:43452", "issue:43504", "issue:43531", "issue:43577", "issue:43606", "issue:43742", "issue:44451", "issue:44464", "issue:44554", "issue:44977", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": "No global best issue: none of the issues is a safe anchor for the rest, since the pairwise similarities are only superficial.", "best_pr_reason": null, "canonical_issue_reason": "No canonical issue: the reports describe distinct failures in different subsystems and code paths.", "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:30064", "reason": "Different features and failure modes: segmentation-map processing vs multi-GPU Qwen2-VL training.", "right": "issue:33666"}, {"accept": false, "left": "issue:28282", "reason": "Both are runtime failures, but one is a missing PyTorch import and the other is a MacOS bus error with CLIP.", "right": "issue:33357"}, {"accept": false, "left": "issue:28282", "reason": "Unrelated bugs: missing backend dependency vs NoneType broadcast error in Trainer.", "right": "issue:32090"}, {"accept": false, "left": "issue:28282", "reason": "Different code paths: dependency import failure vs embedding resize/post_init reinitialization.", "right": "issue:35141"}, {"accept": false, "left": "issue:43504", "reason": "Different model-loading problems: legacy preset field vs CPU offload device mismatch.", "right": "issue:43606"}, {"accept": false, "left": "issue:43577", "reason": "Distinct correctness issues: BLIP2 dtype propagation vs MPS attention shape/value-head mismatch.", "right": "issue:44554"}, {"accept": false, "left": "issue:44464", "reason": "Both affect generation, but the concrete bugs and model stacks differ too much to be the same change.", "right": "issue:44977"}, {"accept": false, "left": "issue:43531", "reason": "Different failures: Qwen3-MoE sliding_window behavior vs a key error loading MobileLLM-125M.", "right": "issue:43742"}, {"accept": false, "left": "issue:43329", "reason": "Both involve VLM internals, but one is an undefined-variable video branch bug and the other is a rope-index scaling issue.", "right": "issue:45325"}, {"accept": false, "left": "issue:43452", "reason": "Both are model-loading failures, but the specific breakage differs: gguf_file support vs loading ScandiBERT.", "right": "issue:44451"}], "summary": "All proposed soft pairs look like false positives. The items span unrelated bugs across loading, training, multimodal processing, generation, and device/runtime issues, so this cluster should not be collapsed into a single duplicate set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:30064|issue:33666", "issue:28282|issue:33357", "issue:28282|issue:32090", "issue:28282|issue:35141", "issue:43504|issue:43606", "issue:43577|issue:44554", "issue:44464|issue:44977", "issue:43531|issue:43742", "issue:43329|issue:45325", "issue:43452|issue:44451"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6838, "estimated_input_tokens": 3291, "item_count": 18, "node_count": 18, "serialized_chars": 13161, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:39:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5b27c35bc57696ea1f590b6d420ed1609aefbfebefec902fe98469f7434daf32", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41720", "issue:43012", "issue:43334", "issue:43504", "issue:43540", "issue:43572", "issue:43577", "issue:43653", "issue:43716", "issue:43927", "issue:44361", "issue:44464", "issue:44479", "issue:44625", "issue:44849", "issue:45200", "issue:45229", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": "No global best issue stands out; any choice would be arbitrary because the cluster is not a duplicate set.", "best_pr_reason": "No pull requests are present in this cluster.", "canonical_issue_reason": "No single issue is a safe canonical because the items span unrelated regressions across different models and subsystems.", "canonical_pr_reason": "No pull requests are present in this cluster.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43504", "reason": "Both are load/token-related, but one is a legacy-field preset loading bug in BEiT and the other is a BigBirdTokenizer special-token registration issue; different components and failure modes.", "right": "issue:43653"}, {"accept": false, "left": "issue:44849", "reason": "Both mention Qwen3.5, but one is about output_hidden_states behavior and the other is a save_pretrained regression with visual encoder keys; separate code paths.", "right": "issue:45357"}, {"accept": false, "left": "issue:43927", "reason": "DiaConfig losing custom token IDs on save/load is unrelated to MLukeTokenizer raising AttributeError on tasks; no shared underlying bug is evident.", "right": "issue:44361"}, {"accept": false, "left": "issue:45200", "reason": "Gemma 4 text-only token-type defaults and Gemma 4 multi-GPU CUDA OOM are different problems; one is input/config handling, the other is memory capacity.", "right": "issue:45229"}, {"accept": false, "left": "issue:43012", "reason": "Both concern dtype/precision, but one is a PyTorch warning during bfloat16 compilation and the other is BLIP2 dtype fields staying float32 after loading; not the same bug.", "right": "issue:43577"}, {"accept": false, "left": "issue:44479", "reason": "Qwen video-input regression and Qwen3.5 num_labels propagation are unrelated; one affects multimodal preprocessing, the other config propagation for labels.", "right": "issue:44625"}, {"accept": false, "left": "issue:43334", "reason": "Both involve missing token IDs in configs, but they affect different model families and different fields (pad_token_id vs pad_token_idx); not the same underlying issue.", "right": "issue:43572"}, {"accept": false, "left": "issue:43540", "reason": "A video-input ValueError in Qwen3OmniMoe and a dtype mismatch between image preprocessor and model in Mistral-3 are distinct multimodal bugs.", "right": "issue:43716"}, {"accept": false, "left": "issue:41720", "reason": "Qwen3 auto device mapping causing cudaErrorAssert and chunked generation inconsistency with compiled forward are different execution-path failures with no clear shared root cause.", "right": "issue:44464"}], "summary": "The cluster is heterogeneous: the paired items share only broad keywords like dtype, token IDs, or model family names, but they describe different bugs, models, and code paths. No soft edge looks like a true duplicate or mergeable PR pair."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43504|issue:43653", "issue:44849|issue:45357", "issue:43927|issue:44361", "issue:45200|issue:45229", "issue:43012|issue:43577", "issue:44479|issue:44625", "issue:43334|issue:43572", "issue:43540|issue:43716", "issue:41720|issue:44464"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6914, "estimated_input_tokens": 3329, "item_count": 18, "node_count": 18, "serialized_chars": 13314, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:39:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c9208e9795bd9e69e3958613039bacb951ca76d39c8f704a4ae6680469659b97", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:32090", "issue:33666", "issue:34567", "issue:43262", "issue:43278", "issue:43493", "issue:43525", "issue:43572", "issue:43688", "issue:43742", "issue:43756", "issue:43825", "issue:43873", "issue:44488", "issue:44821", "issue:44843", "issue:44913", "issue:45084"], "result": {"analyst_result": {"best_issue_reason": "If one issue must anchor the set, #43873 is the most active/open and has the most inbound references, but it is not a true duplicate hub for the others.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out because the items span unrelated trainer, model config, tokenizer, processor, and pipeline problems.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43742", "reason": "Different loading failures: one is a key error for facebook/MobileLLM-125M, the other is an AutoImageProcessor URL-loading problem.", "right": "issue:44821"}, {"accept": false, "left": "issue:43525", "reason": "Both involve missing pad-token-related config fields, but they affect different model configs and different attributes, so they are not the same concrete bug.", "right": "issue:43572"}, {"accept": false, "left": "issue:32090", "reason": "One is a Trainer GPU broadcast TypeError; the other is a Qwen2-VL multi-GPU training request. Same broad area, not the same failure.", "right": "issue:33666"}, {"accept": false, "left": "issue:32090", "reason": "Trainer broadcast NoneType error and TrainerState token-count update bug are distinct trainer issues with different code paths.", "right": "issue:34567"}, {"accept": false, "left": "issue:43756", "reason": "Both concern rotary settings, but one is about Smollm3 dropping RoPE layers while the other is a GPTNeoX config reload regression.", "right": "issue:44913"}, {"accept": false, "left": "issue:43278", "reason": "Embedding dtype drift on evaluate is unrelated to auxiliary-loss normalization in OLMoE/GPT Oss.", "right": "issue:43688"}, {"accept": false, "left": "issue:43493", "reason": "SigLIP2 implementation mismatch and pipeline translation-task messaging are unrelated issues.", "right": "issue:43825"}, {"accept": false, "left": "issue:43262", "reason": "Audio processor chat-template sampling-rate default and template-compilation TypeError are different code paths and symptoms.", "right": "issue:45084"}, {"accept": false, "left": "issue:44488", "reason": "Model loading failure for cjvt/sleng-bert and offline-mode mistral regex patch bug are separate loader problems.", "right": "issue:44843"}, {"accept": false, "left": "issue:43688", "reason": "Auxiliary-loss normalization and quantization/offloading behavior are unrelated bugs.", "right": "issue:43873"}], "summary": "The cluster is heterogeneous: none of the soft pairs look like the same underlying bug or change, so I would not merge any of them as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43742|issue:44821", "issue:43525|issue:43572", "issue:32090|issue:33666", "issue:32090|issue:34567", "issue:43756|issue:44913", "issue:43278|issue:43688", "issue:43493|issue:43825", "issue:43262|issue:45084", "issue:44488|issue:44843", "issue:43688|issue:43873"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7024, "estimated_input_tokens": 3384, "item_count": 18, "node_count": 18, "serialized_chars": 13533, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:40:04Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "957b55e6071b196bc92bdca9cf237aa161816c14d284befa1c28e2448f769844", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33666", "issue:36010", "issue:39692", "issue:43257", "issue:43278", "issue:43299", "issue:43493", "issue:43540", "issue:43756", "issue:43844", "issue:44060", "issue:44423", "issue:44734", "issue:44964", "issue:44977", "issue:45072", "issue:45198", "issue:45405"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:33666", "reason": "Qwen2-VL multi-GPU training and a GenerationMixin import error are different failure modes in different paths.", "right": "issue:36010"}, {"accept": false, "left": "issue:43278", "reason": "One is an embedding dtype regression during eval; the other is a gradient explosion with HfDeepSpeedConfig + ZeRO-3.", "right": "issue:43844"}, {"accept": false, "left": "issue:43756", "reason": "Smollm3 RoPE-layer mismatch and Qwen3-Next tied-weights warning are unrelated architecture/configuration bugs.", "right": "issue:44060"}, {"accept": false, "left": "issue:43257", "reason": "Both mention Qwen3/Qwen3VL MoE loading, but one is an accelerate+deepspeed conversion issue and the other is a transformers version regression; not the same concrete bug.", "right": "issue:43299"}, {"accept": false, "left": "issue:45072", "reason": "CI dtype mismatches for SwitchTransformers/TimmWrapperModel and Wav2Vec2 save/tokenization failures are distinct issues.", "right": "issue:45198"}, {"accept": false, "left": "issue:43540", "reason": "Qwen3OmniMoe video-input ValueError and Qwen3.5 flash-attention generation failure affect different code paths and symptoms.", "right": "issue:44977"}, {"accept": false, "left": "issue:44423", "reason": "Both are serve-related crashes, but one is multimodal continuous batching with a string `.to()` error and the other is KV-cache continuation indexing; different bugs.", "right": "issue:44734"}, {"accept": false, "left": "issue:43493", "reason": "SigLIP2 JAX/HF discrepancy is a model-implementation mismatch, not the ZeRO-3 gradient amplification bug.", "right": "issue:43844"}, {"accept": false, "left": "issue:44964", "reason": "Loading Phi-4-multimodal-instruct with latest transformers and a bumped MIN_PEFT_VERSION release issue are unrelated.", "right": "issue:45405"}, {"accept": false, "left": "issue:39692", "reason": "Both concern SigLIP2, but one is a docs example error/quantization failure and the other is a core HF vs JAX implementation discrepancy; not mergeable as one fix.", "right": "issue:43493"}], "summary": "These items are not duplicates of one another; they span unrelated bugs across different models and subsystems (multimodal training, loading/import failures, documentation examples, MoE loading, dtype mismatches, serving crashes, and release/versioning). No PRs are present in the cluster."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:33666|issue:36010", "issue:43278|issue:43844", "issue:43756|issue:44060", "issue:43257|issue:43299", "issue:45072|issue:45198", "issue:43540|issue:44977", "issue:44423|issue:44734", "issue:43493|issue:43844", "issue:44964|issue:45405", "issue:39692|issue:43493"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6624, "estimated_input_tokens": 3184, "item_count": 17, "node_count": 17, "serialized_chars": 12735, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:40:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b8c1c2d4cd4246c29b6e8931db0375d159caa625c569404fb8a77769d29e3fdc", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33357", "issue:33666", "issue:36331", "issue:43329", "issue:43572", "issue:43606", "issue:43646", "issue:43688", "issue:43746", "issue:43824", "issue:43994", "issue:44291", "issue:44466", "issue:44493", "issue:44521", "issue:44589", "issue:45290"], "result": {"analyst_result": {"best_issue_reason": "issue:45290 is the clearest self-contained report with a narrow scope and specific crash condition, so it is the best representative issue if one must be chosen.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue: the set spans distinct bugs in loading, training, tokenization, config, and serialization across different subsystems/models.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:33666", "reason": "Different problems: multi-GPU training support for Qwen2-VL vs a Trainer API signature break in compute_loss.", "right": "issue:36331"}, {"accept": false, "left": "issue:43824", "reason": "Unrelated failures: an import error for Qwen2.5-VL vs a Float8 storage lookup/type error.", "right": "issue:44589"}, {"accept": false, "left": "issue:44521", "reason": "Both involve apply_chat_template, but the bugs differ: all-zero assistant masks for multimodal inputs vs a crash on tool-call assistant messages with no content.", "right": "issue:45290"}, {"accept": false, "left": "issue:43572", "reason": "Both touch v5 migration, but one is a missing config field and the other is an init_empty_weights argument mismatch; different code paths and fixes.", "right": "issue:44291"}, {"accept": false, "left": "issue:43688", "reason": "Different models and behaviors: auxiliary-loss normalization vs PEFT adapter checkpoint loading.", "right": "issue:43746"}, {"accept": false, "left": "issue:43646", "reason": "Not the same bug: custom model initialization breakage vs inconsistent lm_head serialization depending on device.", "right": "issue:44466"}, {"accept": false, "left": "issue:43994", "reason": "Different symptoms and likely causes: nonsensical AutoModel/pipeline output for SigLIP2 vs unexpected position-id key warnings.", "right": "issue:44493"}, {"accept": false, "left": "issue:43329", "reason": "Separate areas entirely: multimodal video token counting bug vs CPU offload device mismatch in bark-small.", "right": "issue:43606"}, {"accept": false, "left": "issue:33357", "reason": "No overlap beyond being model-related: MacOS bus error in CLIP vs Qwen2-VL multi-GPU training.", "right": "issue:33666"}], "summary": "These items are mostly unrelated issues across different models and code paths; none of the proposed soft edges looks like a true duplicate pair."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:33666|issue:36331", "issue:43824|issue:44589", "issue:44521|issue:45290", "issue:43572|issue:44291", "issue:43688|issue:43746", "issue:43646|issue:44466", "issue:43994|issue:44493", "issue:43329|issue:43606", "issue:33357|issue:33666"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6672, "estimated_input_tokens": 3208, "item_count": 17, "node_count": 17, "serialized_chars": 12829, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:41:05Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "73db7b1b469230daeae337272332cf26467d26098e0738054986610cdace0844", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:15354", "issue:38175", "issue:39401", "issue:39692", "issue:43232", "issue:43257", "issue:43278", "issue:43295", "issue:43540", "issue:43575", "issue:43701", "issue:44315", "issue:44568", "issue:44792", "issue:44849", "issue:45200", "issue:45405"], "result": {"analyst_result": {"best_issue_reason": "No strong best issue exists. If forced to pick the broadest regression-style report, issue:43295 is the most generic, but it is not representative of the rest of the cluster.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits this cluster; the items are unrelated enough that deduplication would be incorrect.", "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:39692", "reason": "SigLIP2 docs-example failures vs a regression in custom model code using processor.tokenizer; different symptoms and likely different fixes.", "right": "issue:43295"}, {"accept": false, "left": "issue:39401", "reason": "Qwen3 tokenizer offset_mapping bug is unrelated to mdeberta-v3 add_special_tokens/BOS/EOS behavior.", "right": "issue:44568"}, {"accept": false, "left": "issue:43575", "reason": "TP OOM when loading Qwen2-57B-A14B-Instruct is unrelated to the PEFT version bump / unreleased dependency issue.", "right": "issue:45405"}, {"accept": false, "left": "issue:43232", "reason": "Generation state sync bug (_update_model_kwargs_for_generation after sync_gpus) is unrelated to Qwen3.5 output_hidden_states handling.", "right": "issue:44849"}, {"accept": false, "left": "issue:44792", "reason": "Janus test failure on image generation is a model-specific test issue, not the PEFT version mismatch problem.", "right": "issue:45405"}, {"accept": false, "left": "issue:43257", "reason": "Qwen3 MOE weight conversion with accelerate+deepspeed is unrelated to the MIN_PEFT_VERSION release/versioning issue.", "right": "issue:45405"}, {"accept": false, "left": "issue:15354", "reason": "torch.jit.script generator-expression export failure is unrelated to unexpected zero probabilities in SigLIP2.", "right": "issue:38175"}, {"accept": false, "left": "issue:43540", "reason": "Video-input validation in Qwen3OmniMoe is unrelated to Gemma 4 mm_token_type_ids defaulting for text-only fine-tuning.", "right": "issue:45200"}, {"accept": false, "left": "issue:43278", "reason": "Embedding dtype drift BF16->FP32 during evaluate is unrelated to resume_from_checkpoint key mismatch.", "right": "issue:43701"}, {"accept": false, "left": "issue:44315", "reason": "Liger Kernel not applied with model_init is a training setup bug, not the same as Qwen3.5 output_hidden_states=True behavior.", "right": "issue:44849"}], "summary": "This cluster is highly heterogeneous: the issues cover unrelated models, tokenizer behavior, generation internals, training/resume mechanics, and a release/versioning problem. There is no clear duplicate set or shared underlying bug across the soft pairs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:39692|issue:43295", "issue:39401|issue:44568", "issue:43575|issue:45405", "issue:43232|issue:44849", "issue:44792|issue:45405", "issue:43257|issue:45405", "issue:15354|issue:38175", "issue:43540|issue:45200", "issue:43278|issue:43701", "issue:44315|issue:44849"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6642, "estimated_input_tokens": 3193, "item_count": 17, "node_count": 17, "serialized_chars": 12769, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:41:27Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d5a6dec165a6c27d88d0638ec857e2120b37190ad6f702d3b6ac079ed622809f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:37428", "issue:43334", "issue:43525", "issue:43582", "issue:43606", "issue:43824", "issue:43844", "issue:43957", "issue:44112", "issue:44442", "issue:44554", "issue:44964", "issue:44977", "issue:45198", "issue:45200", "issue:45325", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "issue:43334 is the best single issue to track the duplicate pad_token_id loading failure because it states the failure mode most directly.", "best_pr_reason": null, "canonical_issue_reason": "issue:43334 is the clearest representative of the shared pad_token_id AttributeError regression; issue:43525 looks like a sibling manifestation on another model config.", "canonical_pr_reason": null, "confidence": 0.69, "soft_edge_verdicts": [{"accept": false, "left": "issue:43582", "reason": "Different bugs: Apple Silicon allocator warmup TypeError vs Qwen3.5 flash-attention generation behavior.", "right": "issue:44977"}, {"accept": false, "left": "issue:43844", "reason": "Different failure modes: ZeRO-3 gradient anomaly vs latest-transformers model loading error.", "right": "issue:44964"}, {"accept": false, "left": "issue:37428", "reason": "Both are loading-related, but one is an import error in flash-attention utils and the other is a meta-device compatibility bug; not the same bug.", "right": "issue:43957"}, {"accept": false, "left": "issue:43606", "reason": "Different scope and root cause: CPU offload device mismatch vs a stale GraniteSpeech CI test.", "right": "issue:44112"}, {"accept": false, "left": "issue:45200", "reason": "Both involve multimodal models, but the bugs are different: missing mm_token_type_ids default vs incorrect rope/position scaling.", "right": "issue:45325"}, {"accept": true, "left": "issue:43334", "reason": "Both report the same pad_token_id AttributeError during model loading, likely the same config regression across different model classes.", "right": "issue:43525"}, {"accept": false, "left": "issue:44554", "reason": "Unrelated issues: MPS attention correctness vs tokenizer codec handling/warning regression.", "right": "issue:45356"}, {"accept": false, "left": "issue:44442", "reason": "Both touch tokenizers, but one is AutoTokenizer loading and the other is save_pretrained/tokenization for a specific model; not the same bug.", "right": "issue:45198"}, {"accept": false, "left": "issue:37428", "reason": "Different import errors involving different symbols and code paths; no clear shared underlying bug.", "right": "issue:43824"}], "summary": "Only one soft duplicate pair stands out: the two pad_token_id loading failures look like the same underlying config regression. The rest are unrelated bugs across different models/subsystems and should not be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43582|issue:44977", "issue:43844|issue:44964", "issue:37428|issue:43957", "issue:43606|issue:44112", "issue:45200|issue:45325", "issue:43334|issue:43525", "issue:44554|issue:45356", "issue:44442|issue:45198", "issue:37428|issue:43824"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6596, "estimated_input_tokens": 3170, "item_count": 17, "node_count": 17, "serialized_chars": 12678, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:41:40Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f5f9e7ff4e7fdd65972caef0a23858dee352345599c3643a4bc66ab93b4f4ea8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:40990", "issue:43295", "issue:43388", "issue:43493", "issue:43526", "issue:43572", "issue:43701", "issue:43873", "issue:44464", "issue:44466", "issue:44589", "issue:44610", "issue:44625", "issue:44898", "issue:44987", "issue:45127", "issue:45412"], "result": {"analyst_result": {"best_issue_reason": "None of the issues stands out as a cluster anchor for duplicate triage; they do not share a concrete code path or symptom family.", "best_pr_reason": null, "canonical_issue_reason": "No single issue is a good canonical representative because the cluster is heterogeneous rather than one underlying bug.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:44898", "reason": "Different problems: one is an image-resolution mismatch in Perceiver classification, the other is RT-DETR memory not being released after deletion.", "right": "issue:45412"}, {"accept": false, "left": "issue:44466", "reason": "Unrelated bugs: inconsistent lm_head serialization vs LoRA merge collapse after extending vocabulary; same broad area of model weights, but not the same failure mode or fix.", "right": "issue:45127"}, {"accept": false, "left": "issue:43388", "reason": "Both involve labels, but one is gather_for_metrics dropping tuple elements in the last batch while the other is BeitImageProcessorFast reduce_labels returning only one label; distinct code paths.", "right": "issue:43526"}, {"accept": false, "left": "issue:43295", "reason": "Processor/tokenizer regression with images is unrelated to OmDet-Turbo producing the wrong input size; different components and symptoms.", "right": "issue:44610"}, {"accept": false, "left": "issue:43493", "reason": "SigLIP2 implementation discrepancy is not the same as a resume_from_checkpoint key mismatch; no shared underlying bug.", "right": "issue:43701"}, {"accept": false, "left": "issue:40990", "reason": "Extreme perplexity on a specific model/dataset is unrelated to Qwen3.5 num_labels propagation between configs.", "right": "issue:44625"}, {"accept": false, "left": "issue:43572", "reason": "Both mention loading/configuration, but StableLmConfig missing pad_token_idx and transformers>=5.1.0 failing on physical-intelligence/fast are different issues.", "right": "issue:44987"}, {"accept": false, "left": "issue:43572", "reason": "StableLmConfig pad_token_idx regression and Float8 storage lookup failure are separate bugs in different subsystems.", "right": "issue:44589"}, {"accept": false, "left": "issue:43873", "reason": "Quantization/offloading behavior and chunked generation with compiled forward are not the same code-path problem.", "right": "issue:44464"}], "summary": "These issues are not duplicates of one another; they span unrelated bugs across model loading, processors, training utilities, serialization, and memory behavior. No PRs are present."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44898|issue:45412", "issue:44466|issue:45127", "issue:43388|issue:43526", "issue:43295|issue:44610", "issue:43493|issue:43701", "issue:40990|issue:44625", "issue:43572|issue:44987", "issue:43572|issue:44589", "issue:43873|issue:44464"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6498, "estimated_input_tokens": 3121, "item_count": 17, "node_count": 17, "serialized_chars": 12483, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:42:28Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ccc5b0eea96adcf876a6eda5b369e9820213966041276027401b5b16709ac7d3", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38175", "issue:42915", "issue:43819", "issue:43824", "issue:43844", "issue:43975", "issue:44360", "issue:44514", "issue:44521", "issue:44561", "issue:44625", "issue:44849", "issue:44871", "issue:44936", "issue:45003", "issue:45083", "issue:45412"], "result": {"analyst_result": {"best_issue_reason": "Issue 44514 has a specific failure mode, a narrow API surface (`Qwen2_5_VLProcessor.apply_chat_template`), and reproducible runtime behavior, so it is the strongest representative issue even though it is not a duplicate hub for the rest of the set.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44514 is the clearest, most concrete bug report in the set and is the closest thematic anchor to another item in the cluster, but the cluster overall is too mixed to have a strong true canonical issue.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43844", "reason": "Different failure modes: ZeRO-3 gradient explosion during training vs. `trainer.evaluate()` breaking after `trainer.train()`. Same library area, but not the same bug.", "right": "issue:44936"}, {"accept": false, "left": "issue:44514", "reason": "Both involve `apply_chat_template`, but one is a batched-input crash with `padding=False` and the other is all-zero assistant masks for multimodal inputs. Related area, distinct underlying defects.", "right": "issue:44521"}, {"accept": false, "left": "issue:44625", "reason": "Config propagation for Qwen3.5 `num_labels` is unrelated to Gemma-3 EOS token configuration mismatch.", "right": "issue:44871"}, {"accept": false, "left": "issue:43824", "reason": "An import error for a Qwen2.5-VL class is unrelated to DeepSeek Coder detokenization in v5.", "right": "issue:43975"}, {"accept": false, "left": "issue:44360", "reason": "A missing ReLU in the DSA indexer and the removal of `is_torch_fx_available` breaking `trust_remote_code` models are unrelated regressions.", "right": "issue:44561"}, {"accept": false, "left": "issue:38175", "reason": "Unexpected zero probabilities in SigLIP2 is unrelated to RT-DETR memory not being released after deletion.", "right": "issue:45412"}, {"accept": false, "left": "issue:44849", "reason": "Qwen3.5 hidden-state handling and unsafe `sys.modules[]` access are different code paths and different bug classes.", "right": "issue:45003"}, {"accept": false, "left": "issue:43819", "reason": "DAC latent/STE mismatch is unrelated to `_get_feat_extract_output_lengths` behavior in qwen3_omni_moe.", "right": "issue:45083"}, {"accept": false, "left": "issue:42915", "reason": "FineGrainedFP8Config failure in Qwen3Moe is unrelated to the Qwen2.5-VL import error.", "right": "issue:43824"}], "summary": "The items are a heterogeneous mix of unrelated model-specific bugs and regressions. None of the soft pairs look like the same underlying issue, so I would not merge any of them as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43844|issue:44936", "issue:44514|issue:44521", "issue:44625|issue:44871", "issue:43824|issue:43975", "issue:44360|issue:44561", "issue:38175|issue:45412", "issue:44849|issue:45003", "issue:43819|issue:45083", "issue:42915|issue:43824"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6964, "estimated_input_tokens": 3354, "item_count": 18, "node_count": 18, "serialized_chars": 13416, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:42:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f3a27664077d4da5645cf423964194124b65306b6f7b1ec4c7424d4e0d065c08", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38175", "issue:43278", "issue:43295", "issue:43450", "issue:43493", "issue:43575", "issue:43606", "issue:43827", "issue:43856", "issue:43873", "issue:43901", "issue:43994", "issue:44464", "issue:44568", "issue:44610", "issue:44857", "issue:44991", "issue:45083"], "result": {"analyst_result": {"best_issue_reason": "No global best issue for the cluster; there is no cohesive theme strong enough to treat one issue as the representative duplicate target.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out: the items cover unrelated regressions, model-specific bugs, and docs issues, and none of the soft pairs match the same concrete underlying defect.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43575", "reason": "Different problems: Qwen2 TP loading OOM vs chunked generation inconsistency under compiled forward.", "right": "issue:44464"}, {"accept": false, "left": "issue:43295", "reason": "Custom processor/tokenizer regression is unrelated to quantization offload behavior.", "right": "issue:43873"}, {"accept": false, "left": "issue:43278", "reason": "Embedding dtype drift in evaluate is not the same bug as Qwen3 MoE training memory inefficiency.", "right": "issue:43856"}, {"accept": false, "left": "issue:43295", "reason": "Processor.tokenizer regression and SigLIP2 nonsensical outputs are different concrete failures.", "right": "issue:43994"}, {"accept": false, "left": "issue:43295", "reason": "Custom tokenizer/image regression is unrelated to batched video processor shape bugs.", "right": "issue:43450"}, {"accept": false, "left": "issue:43827", "reason": "Both are docs-related, but they concern different API references and different behavioral mismatches.", "right": "issue:43901"}, {"accept": false, "left": "issue:44568", "reason": "Tokenizer special-token handling for one model is distinct from tokenizer loading failure for another model.", "right": "issue:44991"}, {"accept": false, "left": "issue:43606", "reason": "CPU offload device mismatch for bark-small is unrelated to a qwen3_omni_moe helper-function behavior issue.", "right": "issue:45083"}, {"accept": false, "left": "issue:44610", "reason": "Processor input-size mismatch and float16 AMP crash are different code paths and symptoms.", "right": "issue:44857"}, {"accept": false, "left": "issue:38175", "reason": "Both involve SigLIP2, but one reports zero probabilities and the other a HF-vs-JAX implementation discrepancy; not the same concrete bug.", "right": "issue:43493"}], "summary": "These soft pairs are mostly superficial similarities across different bugs. They span unrelated models, APIs, and failure modes, so none of the candidate pairs should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43575|issue:44464", "issue:43295|issue:43873", "issue:43278|issue:43856", "issue:43295|issue:43994", "issue:43295|issue:43450", "issue:43827|issue:43901", "issue:44568|issue:44991", "issue:43606|issue:45083", "issue:44610|issue:44857", "issue:38175|issue:43493"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6976, "estimated_input_tokens": 3360, "item_count": 18, "node_count": 18, "serialized_chars": 13437, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:43:23Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "53095fa9cc5e7179ce459b46bc0f84ec8f64c6dead5be1445c77e15ae7d60c27", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:37428", "issue:43425", "issue:43450", "issue:43493", "issue:43653", "issue:44112", "issue:44186", "issue:44351", "issue:44462", "issue:44488", "issue:44561", "issue:44568", "issue:44779", "issue:45127", "issue:45198", "issue:45245", "issue:45290", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "issue:44462 has the clearest and most generalizable failure mode among the issues, making it the best anchor for this set.", "best_pr_reason": null, "canonical_issue_reason": "issue:44462 is the most representative standalone bug report: it describes a concrete, user-visible regression in a core loading path (AutoTokenizer ignoring tokenizer.json from the repository) and is broader than the model-specific tokenizer complaints.", "canonical_pr_reason": null, "confidence": 0.81, "soft_edge_verdicts": [{"accept": false, "left": "issue:44462", "reason": "Different failures: tokenizer.json loading regression vs removal of is_torch_fx_available breaking trust_remote_code models. Same ecosystem, not the same bug.", "right": "issue:44561"}, {"accept": false, "left": "issue:45127", "reason": "Unrelated problems: LoRA merge/collapsed outputs with extended vocab vs a category-count limit error. No shared code-path or fix.", "right": "issue:45245"}, {"accept": false, "left": "issue:44488", "reason": "Both are tokenizer-related, but one is a repo-loading issue for a specific model and the other is add_special_tokens not adding BOS/EOS for mdeberta-v3-base. Different concrete behavior.", "right": "issue:44568"}, {"accept": false, "left": "issue:44561", "reason": "Both mention v5 tokenizer breakage, but one is a trust_remote_code import/API regression and the other is Deepseek tokenization output changes. Not the same bug.", "right": "issue:44779"}, {"accept": false, "left": "issue:43450", "reason": "Video processor batched-shape bug vs SigLIP2 HF/JAX discrepancy. Different models and different code paths.", "right": "issue:43493"}, {"accept": false, "left": "issue:37428", "reason": "Both are compatibility/import issues, but one is a missing flash-attention helper symbol and the other is Torch 2.10 incompatibility. Too broad to merge.", "right": "issue:43425"}, {"accept": false, "left": "issue:45290", "reason": "Chat template crash on tool-call assistant messages vs Qwen2.5-VL video vision_position_ids issue. Different subsystems and failure modes.", "right": "issue:45381"}, {"accept": false, "left": "issue:43653", "reason": "BigBirdTokenizer special-token registration bug vs a GraniteSpeech CI stale test failure. One is a product bug, the other appears test-specific.", "right": "issue:44112"}, {"accept": false, "left": "issue:37428", "reason": "Both are import errors, but for different missing symbols in different modules. No evidence of the same underlying regression.", "right": "issue:44351"}, {"accept": false, "left": "issue:44186", "reason": "LayoutLMv2 tokenizer NER/padding crash vs Wav2Vec2 save_pretrained/tokenization failure. Different model families and different breakage points.", "right": "issue:45198"}], "summary": "No soft-edge pair looks like the same underlying bug/change. The items cluster around tokenizer/loading/regression complaints, but each pair points to a different model, API symbol, or failure mode, so they should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44462|issue:44561", "issue:45127|issue:45245", "issue:44488|issue:44568", "issue:44561|issue:44779", "issue:43450|issue:43493", "issue:37428|issue:43425", "issue:45290|issue:45381", "issue:43653|issue:44112", "issue:37428|issue:44351", "issue:44186|issue:45198"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6622, "estimated_input_tokens": 3183, "item_count": 17, "node_count": 17, "serialized_chars": 12730, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:43:45Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "82882b49b456771120976dbf3534733c6f955a718091c0449429114f391b1b64", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29942", "issue:32090", "issue:39692", "issue:43262", "issue:43278", "issue:43450", "issue:43526", "issue:43575", "issue:43819", "issue:43825", "issue:44112", "issue:44265", "issue:44448", "issue:44743", "issue:44855", "issue:45083", "issue:45198"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:44448", "reason": "Different models and failure modes: Pegasus output drift vs Qwen3 recurrent-state reset with cache. No shared code-path bug.", "right": "issue:44743"}, {"accept": false, "left": "issue:43450", "reason": "Video batch-shape bug vs Qwen2 TP OOM are unrelated symptoms in different subsystems.", "right": "issue:43575"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 docs/example errors do not match BeitImageProcessorFast label reduction behavior.", "right": "issue:43526"}, {"accept": false, "left": "issue:29942", "reason": "Flash Attention test failures and Trainer _gpu_broadcast_one NoneType error are distinct training/runtime issues.", "right": "issue:32090"}, {"accept": false, "left": "issue:43278", "reason": "Embedding dtype mismatch during eval is unrelated to pipeline translation-task error messaging.", "right": "issue:43825"}, {"accept": false, "left": "issue:44265", "reason": "torch.export failure with torch_compilable_check is a different problem from Wav2Vec2 save_pretrained/tokenization failures.", "right": "issue:45198"}, {"accept": false, "left": "issue:43819", "reason": "DAC latent/forward mismatch is not the same as a stale CI test in GraniteSpeech.", "right": "issue:44112"}, {"accept": false, "left": "issue:44855", "reason": "Python 3.13 import parsing error in DebertaV2Model is unrelated to qwen3_omni_moe feature-length logic.", "right": "issue:45083"}, {"accept": false, "left": "issue:43262", "reason": "Audio chat-template sampling-rate default bug and video batched-shape bug are different modality-processing issues.", "right": "issue:43450"}], "summary": "This cluster is not a duplicate set: the items span unrelated bugs across training, inference, multimodal processors, export, and CI. All soft edges look like superficial similarity rather than the same underlying defect."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44448|issue:44743", "issue:43450|issue:43575", "issue:39692|issue:43526", "issue:29942|issue:32090", "issue:43278|issue:43825", "issue:44265|issue:45198", "issue:43819|issue:44112", "issue:44855|issue:45083", "issue:43262|issue:43450"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7120, "estimated_input_tokens": 3432, "item_count": 18, "node_count": 18, "serialized_chars": 13728, "soft_pair_count": 11}, "cached_at": "2026-04-14T20:44:36Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8dadbb5335fdc46c00dc09ce53e56099c9be9bb7bebb7f1ab860377602196df1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38175", "issue:39692", "issue:43335", "issue:43404", "issue:43450", "issue:43526", "issue:43550", "issue:43611", "issue:43819", "issue:43931", "issue:44448", "issue:44493", "issue:44779", "issue:44977", "issue:45083", "issue:45200", "issue:45229", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "No issue is a strong global canonical representative; issue:44493 is the broadest wording, but it is still too generic and not actually aligned with the other reports.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue is appropriate: the set spans distinct bugs across different models, processors, and failure modes, so any representative would be arbitrary.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:45200", "reason": "Different models and different tensor fields: Gemma4 mm_token_type_ids handling vs Qwen2.5-VL video vision_position_ids. Not the same bug.", "right": "issue:45381"}, {"accept": false, "left": "issue:43611", "reason": "Both concern model loading, but one is a base_model_prefix regression and the other is a specific Qwen3-VL weight-shape mismatch. Different failure modes.", "right": "issue:43931"}, {"accept": false, "left": "issue:43404", "reason": "Unrelated model-specific issues: untied lm_head weights in Mistral3 vs missing default mm_token_type_ids in Gemma4.", "right": "issue:45200"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 doc-example mistakes are not the same as the general position-id warning across many models.", "right": "issue:44493"}, {"accept": false, "left": "issue:43819", "reason": "DAC forward/from_latents mismatch is a codec/quantization logic bug, not a position-id loading warning.", "right": "issue:44493"}, {"accept": false, "left": "issue:44448", "reason": "Both mention v4/v5 behavior changes, but one is Pegasus generation output and the other is Deepseek tokenizer correctness. Different code paths.", "right": "issue:44779"}, {"accept": false, "left": "issue:38175", "reason": "SigLIP2 zero probabilities is a model output issue, not the broad unexpected position-id key issue.", "right": "issue:44493"}, {"accept": false, "left": "issue:44977", "reason": "Flash-attention generation failure in Qwen3.5 is unrelated to Gemma4 multi-GPU CUDA OOM.", "right": "issue:45229"}, {"accept": false, "left": "issue:43450", "reason": "Both are processor bugs, but one is batched video shape handling and the other is Beit reduce_labels returning only one label. Not the same defect.", "right": "issue:43526"}, {"accept": false, "left": "issue:43550", "reason": "Torch.compile/SDPA failure in Bamba-9B-v2 is unrelated to qwen3_omni_moe feature-length helper behavior.", "right": "issue:45083"}, {"accept": false, "left": "issue:43335", "reason": "SwitchTransformers sparse-layer config bug and qwen3_omni_moe length helper bug are unrelated subsystems and symptoms.", "right": "issue:45083"}], "summary": "These items are mostly unrelated bug reports that only share superficial wording around regressions, model loading, or processor behavior. None of the soft pairs look like the same underlying issue, so there is no safe duplicate merge candidate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:45200|issue:45381", "issue:43611|issue:43931", "issue:43404|issue:45200", "issue:39692|issue:44493", "issue:43819|issue:44493", "issue:44448|issue:44779", "issue:38175|issue:44493", "issue:44977|issue:45229", "issue:43450|issue:43526", "issue:43550|issue:45083", "issue:43335|issue:45083"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7044, "estimated_input_tokens": 3394, "item_count": 18, "node_count": 18, "serialized_chars": 13575, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:45:09Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9fa8c2f434601f26b2675537f40f24b4fafd402bf9b8d09b92f13797b703d0e9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29127", "issue:32090", "issue:40444", "issue:43066", "issue:43335", "issue:43526", "issue:43550", "issue:43606", "issue:43653", "issue:43723", "issue:44448", "issue:44462", "issue:44554", "issue:44857", "issue:44977", "issue:45072", "issue:45372", "issue:45406"], "result": {"analyst_result": {"best_issue_reason": "No single issue is a safe representative for this cluster because none of the soft pairs share the same concrete code-path failure.", "best_pr_reason": null, "canonical_issue_reason": "No canonical issue: the cluster is a loose set of unrelated bugs rather than one underlying defect.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:29127", "reason": "Different bugs: LayoutLMv3 error messaging vs Trainer/_gpu_broadcast_one NoneType failure.", "right": "issue:32090"}, {"accept": false, "left": "issue:43335", "reason": "Different model/code paths: SwitchTransformers config bug vs Bamba torch.compile+SDPA failure.", "right": "issue:43550"}, {"accept": false, "left": "issue:43066", "reason": "Both tokenizer-related, but one is decoder type selection and the other is AutoTokenizer ignoring tokenizer.json.", "right": "issue:44462"}, {"accept": false, "left": "issue:40444", "reason": "Different areas: Qwen2.5-VL multi-image IterableDataset failure vs BEiT fast image processor label reduction bug.", "right": "issue:43526"}, {"accept": false, "left": "issue:44857", "reason": "Both precision-related, but they affect different models and different paths (LwDetr AMP crash vs SwitchTransformers/TimmWrapper bfloat16 mismatch).", "right": "issue:45072"}, {"accept": false, "left": "issue:45372", "reason": "Gemma 4 processor issues, but one is a missing dependency import and the other is a missing _tokenizer attribute in serve; not the same bug.", "right": "issue:45406"}, {"accept": false, "left": "issue:44448", "reason": "Different failures in different models: Pegasus output regression vs Qwen3.5 flash-attention generation bug.", "right": "issue:44977"}, {"accept": false, "left": "issue:43723", "reason": "Tokenizer loading in v5 is unrelated to an MPS attention correctness issue.", "right": "issue:44554"}, {"accept": false, "left": "issue:43606", "reason": "Different bugs: CPU offload device mismatch in bark-small vs float16 AMP crash in LwDetrImageLoss.", "right": "issue:44857"}, {"accept": false, "left": "issue:43335", "reason": "Completely different subsystems: SwitchTransformers config vs BigBird tokenizer special-token registration.", "right": "issue:43653"}], "summary": "All proposed soft matches are superficial theme overlaps (tokenizers, precision, Gemma 4, etc.) but each pair points to a different concrete failure path, model, or subsystem. None should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:29127|issue:32090", "issue:43335|issue:43550", "issue:43066|issue:44462", "issue:40444|issue:43526", "issue:44857|issue:45072", "issue:45372|issue:45406", "issue:44448|issue:44977", "issue:43723|issue:44554", "issue:43606|issue:44857", "issue:43335|issue:43653"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6870, "estimated_input_tokens": 3307, "item_count": 17, "node_count": 17, "serialized_chars": 13227, "soft_pair_count": 11}, "cached_at": "2026-04-14T20:45:36Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ca41e4c396d8fdfdfc9d4ba9dfaf70d3dbad332eb56ca17c2c08906bffb157d1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:34689", "issue:35141", "issue:37428", "issue:41720", "issue:43335", "issue:43504", "issue:43550", "issue:43653", "issue:43873", "issue:44112", "issue:44493", "issue:44898", "issue:44987", "issue:45072", "issue:45083", "issue:45198", "issue:45229"], "result": {"analyst_result": {"best_issue_reason": "If a representative issue must be chosen, issue 43873 is the broadest and most central-looking candidate because it is open and has the most inbound activity, but it still should not be treated as the duplicate center.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits the cluster: the items describe unrelated regressions rather than one underlying bug.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:44898", "reason": "Different model families and failure modes: Perceiver image-resize interpolation vs Wav2Vec2 save/tokenization.", "right": "issue:45198"}, {"accept": false, "left": "issue:37428", "reason": "An import-time missing helper is not the same bug as a model-loading failure for a specific repo.", "right": "issue:44987"}, {"accept": false, "left": "issue:43335", "reason": "SwitchTransformers sparse-layer construction and BEiT legacy-field loading are distinct code paths.", "right": "issue:43504"}, {"accept": false, "left": "issue:43335", "reason": "A model-config bug is unrelated to a GraniteSpeech CI test about stale device overrides.", "right": "issue:44112"}, {"accept": false, "left": "issue:43653", "reason": "Tokenizer special-token registration and feature-extractor length computation are different subsystems.", "right": "issue:45083"}, {"accept": false, "left": "issue:43335", "reason": "Sparse-layer creation in SwitchTransformers is unrelated to bfloat16 dtype mismatch failures.", "right": "issue:45072"}, {"accept": false, "left": "issue:43873", "reason": "Quantization/offloading behavior and Gemma4 multi-GPU CUDA OOM are not the same underlying issue.", "right": "issue:45229"}, {"accept": false, "left": "issue:43873", "reason": "Offloading with quantization does not match the position-id key regression described here.", "right": "issue:44493"}, {"accept": false, "left": "issue:43550", "reason": "Bamba torch.compile+SDPA failure and GraniteSpeech CI stale override failure are unrelated.", "right": "issue:44112"}, {"accept": false, "left": "issue:34689", "reason": "Llama 3.2 vision loading regression and embedding reinit after resize_embeddings are separate bugs.", "right": "issue:35141"}, {"accept": false, "left": "issue:41720", "reason": "Qwen3 auto-device-map cudaErrorAssert and Gemma4 CUDA OOM are different runtime failures.", "right": "issue:45229"}], "summary": "This cluster is heterogeneous: the soft-similarity pairs point to different bugs across config, tokenizer, loading, quantization, CI, and inference paths. I would not merge any pair as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44898|issue:45198", "issue:37428|issue:44987", "issue:43335|issue:43504", "issue:43335|issue:44112", "issue:43653|issue:45083", "issue:43335|issue:45072", "issue:43873|issue:45229", "issue:43873|issue:44493", "issue:43550|issue:44112", "issue:34689|issue:35141", "issue:41720|issue:45229"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6750, "estimated_input_tokens": 3247, "item_count": 17, "node_count": 17, "serialized_chars": 12985, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:46:05Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "586f02377a5fbf41d63138061d2fccd85f9cb9e9a33fa6eaf22a5755a4935619", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29942", "issue:32090", "issue:33357", "issue:35141", "issue:43065", "issue:43278", "issue:43329", "issue:43720", "issue:43976", "issue:43994", "issue:44112", "issue:44442", "issue:44448", "issue:44466", "issue:45072", "issue:45412", "issue:45440"], "result": {"analyst_result": {"best_issue_reason": "Issue 44466 is the strongest standalone issue from a triage perspective: specific behavior, clear reproduction context, and a well-scoped bug report.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44466 is the most concrete and self-contained regression, but the cluster does not contain a clear duplicate group to canonically represent.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43278", "reason": "Different bugs: one is training/evaluate dtype drift for embeddings, the other is an undefined-variable error in multimodal token counting for video.", "right": "issue:43329"}, {"accept": false, "left": "issue:43976", "reason": "Not the same change: Python version compatibility for Transformers 5.1.0 is unrelated to DeepSeekV3 diverging from the remote implementation.", "right": "issue:45440"}, {"accept": false, "left": "issue:43720", "reason": "Both are CI-flavored, but they affect different models and failure modes: BitNet unpacking vs a stale GraniteSpeech device override test.", "right": "issue:44112"}, {"accept": false, "left": "issue:43065", "reason": "Different model families and symptoms: dummy Conv2d in Sam3PixelDecoder versus nonsensical outputs from SigLIP2 with AutoModel/pipeline.", "right": "issue:43994"}, {"accept": false, "left": "issue:29942", "reason": "Flash Attention test failures are unrelated to token-embedding resizing causing output embeddings to be reinitialized in post_init.", "right": "issue:35141"}, {"accept": false, "left": "issue:43065", "reason": "Both mention model internals, but one is about a dummy layer in Sam3PixelDecoder and the other is a memory-release/GC issue in RT-DETR.", "right": "issue:45412"}, {"accept": false, "left": "issue:32090", "reason": "Different root causes and subsystems: Trainer GPU broadcast NoneType error versus a MacOS bus error with a community CLIP model.", "right": "issue:33357"}, {"accept": false, "left": "issue:44442", "reason": "Tokenizer-loading failure and bfloat16 dtype mismatches are separate problems, not the same code-path bug.", "right": "issue:45072"}, {"accept": false, "left": "issue:44448", "reason": "Both involve model output behavior, but one is a Pegasus v4/v5 output discrepancy and the other is serialization of lm_head.weight tied to device placement.", "right": "issue:44466"}, {"accept": false, "left": "issue:43994", "reason": "SigLIP2 incorrect inference output is unrelated to RT-DETR memory not being released after deletion.", "right": "issue:45412"}], "summary": "This cluster is heterogeneous: it mixes unrelated bugs in multimodal processing, dtype/serialization, CI, tokenizer loading, and model-specific regressions. None of the soft edges look like true duplicates or the same underlying fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43278|issue:43329", "issue:43976|issue:45440", "issue:43720|issue:44112", "issue:43065|issue:43994", "issue:29942|issue:35141", "issue:43065|issue:45412", "issue:32090|issue:33357", "issue:44442|issue:45072", "issue:44448|issue:44466", "issue:43994|issue:45412"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6536, "estimated_input_tokens": 3140, "item_count": 17, "node_count": 17, "serialized_chars": 12557, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:46:37Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7130dde198022493c50f9e35be68f5d9400059cd75ae7ebe4570cce0dc9cd1ac", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:25251", "issue:32090", "issue:42915", "issue:42994", "issue:43257", "issue:43278", "issue:43381", "issue:43653", "issue:43756", "issue:44206", "issue:44442", "issue:44479", "issue:44734", "issue:44871", "issue:44977", "issue:45406", "issue:45440"], "result": {"analyst_result": {"best_issue_reason": "45440 is the best issue candidate overall because it describes a specific, actionable correctness problem rather than a broad symptom or model-specific one-off, and it has the clearest potential for an eventual fix.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45440 is the strongest standalone report in the set: it is a concrete, user-facing model divergence bug with clear scope and active references. If one issue must anchor this otherwise unrelated group, it is the most representative.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:42915", "reason": "Different failures: FineGrainedFP8Config incompatibility for Qwen3Moe vs quantized model saving. Same quantization theme, but not the same code path or fix.", "right": "issue:42994"}, {"accept": false, "left": "issue:44734", "reason": "Serving KV-cache indexing crash and Gemma4Processor missing _tokenizer are unrelated subsystems and failure modes.", "right": "issue:45406"}, {"accept": false, "left": "issue:25251", "reason": "Top-k pipeline output shape regression is unrelated to Trainer broadcasting a NoneType into _gpu_broadcast_one.", "right": "issue:32090"}, {"accept": false, "left": "issue:44871", "reason": "Different models and different bugs: eos_token_id config mismatch vs flash-attention generation regression.", "right": "issue:44977"}, {"accept": false, "left": "issue:43257", "reason": "Both mention model state handling, but one is MoE weight conversion with accelerate+deepspeed and the other is embedding dtype drift during evaluate; not the same underlying bug.", "right": "issue:43278"}, {"accept": false, "left": "issue:43653", "reason": "Both concern tokenizers, but one is a special-token registration bug causing empty decode, while the other is an AutoTokenizer loading failure for a different tokenizer class.", "right": "issue:44442"}, {"accept": false, "left": "issue:43278", "reason": "Eval-mode behavior is the only overlap; dtype changes in evaluation and gradient checkpointing restrictions are distinct issues.", "right": "issue:43381"}, {"accept": false, "left": "issue:44206", "reason": "Unsupported feature argument in an audio feature extractor is unrelated to the video-input regression affecting Qwen multimodal models.", "right": "issue:44479"}, {"accept": false, "left": "issue:43756", "reason": "Both are about model implementation mismatches, but they affect different architectures and different mechanisms; they are not the same concrete bug.", "right": "issue:45440"}], "summary": "These items do not form a real duplicate cluster; they span unrelated bugs in tokenizers, serving, model configs, quantization, and training/runtime behavior. All soft-similarity pairs should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42915|issue:42994", "issue:44734|issue:45406", "issue:25251|issue:32090", "issue:44871|issue:44977", "issue:43257|issue:43278", "issue:43653|issue:44442", "issue:43278|issue:43381", "issue:44206|issue:44479", "issue:43756|issue:45440"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6866, "estimated_input_tokens": 3305, "item_count": 18, "node_count": 18, "serialized_chars": 13218, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:46:52Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "382886945bceba1fbe828cd8f3a5d99e2bca70c1aea83d9576c95195c71cd1c4", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29127", "issue:29942", "issue:30064", "issue:32090", "issue:33290", "issue:33357", "issue:34567", "issue:39692", "issue:43334", "issue:43531", "issue:43653", "issue:43824", "issue:44279", "issue:44610", "issue:44743", "issue:45103", "issue:45372", "issue:45412"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:45103", "reason": "Both are import/runtime failures, but they involve different components and different root causes: auto_docstring annotation handling vs Gemma 4 processor loading.", "right": "issue:45372"}, {"accept": false, "left": "issue:43824", "reason": "43824 is a specific missing export for Qwen2_5_VLForConditionalGeneration; 44279 is a generic dependency issue without the same concrete failure mode.", "right": "issue:44279"}, {"accept": false, "left": "issue:43653", "reason": "BigBird tokenizer special-token decoding and Qwen3 recurrent-state reset are unrelated code paths and bugs.", "right": "issue:44743"}, {"accept": false, "left": "issue:44610", "reason": "Processor input-size mismatch for OmDet-Turbo and RT-DETR memory not being released are different problems with different fixes.", "right": "issue:45412"}, {"accept": false, "left": "issue:29942", "reason": "Flash Attention 2 test failures and void segmentation map processing are unrelated issues.", "right": "issue:30064"}, {"accept": false, "left": "issue:33357", "reason": "Both touch vision-model workflows, but one is a MacOS bus error for a CLIP model and the other is a SigLIP2 documentation/example problem; not the same bug.", "right": "issue:39692"}, {"accept": false, "left": "issue:29127", "reason": "LayoutLMv3 box-validation messaging and TrainerState token-count tracking are completely different areas.", "right": "issue:34567"}, {"accept": false, "left": "issue:43334", "reason": "Both mention Qwen3, but one is a missing pad_token_id import/load failure for Qwen3-VL and the other is a sliding_window bug for Qwen3-MoE; different concrete problems.", "right": "issue:43531"}, {"accept": false, "left": "issue:32090", "reason": "Trainer NoneType broadcast error and DeepSpeed Adafactor OOM are both training-related, but they are distinct failure modes with different underlying causes.", "right": "issue:33290"}], "summary": "The items are a mixed bag of unrelated issues across different models, trainers, and processors. None of the soft pairs look like the same underlying bug, so there is no single canonical representative for this cluster."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:45103|issue:45372", "issue:43824|issue:44279", "issue:43653|issue:44743", "issue:44610|issue:45412", "issue:29942|issue:30064", "issue:33357|issue:39692", "issue:29127|issue:34567", "issue:43334|issue:43531", "issue:32090|issue:33290"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6802, "estimated_input_tokens": 3273, "item_count": 18, "node_count": 18, "serialized_chars": 13089, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:47:31Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ec70473b353b125658545d7fedf7dba05466f60cfcb780ca146f1806b9d883bb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29942", "issue:36010", "issue:43232", "issue:43262", "issue:43381", "issue:43388", "issue:43504", "issue:43526", "issue:43701", "issue:44186", "issue:44206", "issue:44387", "issue:44442", "issue:44857", "issue:44898", "issue:44977", "issue:45229", "issue:45245"], "result": {"analyst_result": {"best_issue_reason": "Issue 44387 is the best overall representative for routing/triage, but only as a loose cluster anchor rather than a duplicate target.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44387 is the closest thing to a representative item because it has the highest discussion activity and a broad CUDA/memory symptom, but it does not \u05d1\u05d0\u05de\u05ea subsume the rest of the cluster.", "canonical_pr_reason": null, "confidence": 0.22, "soft_edge_verdicts": [{"accept": false, "left": "issue:43504", "reason": "Different bug classes: pretrained preset loading/legacy field vs float16 AMP loss crash on CUDA. No shared code path.", "right": "issue:44857"}, {"accept": false, "left": "issue:44977", "reason": "Flash-attention generation bug vs category-count limit error. These are unrelated failures in different subsystems.", "right": "issue:45245"}, {"accept": false, "left": "issue:44387", "reason": "Both mention CUDA/OOM, but one is int4 quantization reserved-memory growth and the other is Gemma4 multi-GPU inference OOM. Too different to merge.", "right": "issue:45229"}, {"accept": false, "left": "issue:43526", "reason": "BeitImageProcessorFast label reduction bug vs LayoutLMv2Tokenizer crash on NER/padding. Different components and failure modes.", "right": "issue:44186"}, {"accept": false, "left": "issue:43262", "reason": "Audio chat-template sampling-rate default vs unsupported center arg in feature extraction. Not the same underlying bug.", "right": "issue:44206"}, {"accept": false, "left": "issue:43381", "reason": "Gradient-checkpointing eval-mode restriction and resume-from-checkpoint key mismatch are unrelated training/checkpoint issues.", "right": "issue:43701"}, {"accept": false, "left": "issue:44442", "reason": "AutoTokenizer loading failure and Perceiver non-default-resolution failure are different model/input-path bugs.", "right": "issue:44898"}, {"accept": false, "left": "issue:29942", "reason": "Flash Attention 2 test failures vs GenerationMixin import error. Similar packaging area, but not the same concrete problem.", "right": "issue:36010"}, {"accept": false, "left": "issue:43232", "reason": "Generation kwargs update after sync_gpus vs gather_for_metrics dropping last-batch labels. Different code paths and fixes.", "right": "issue:43388"}], "summary": "This cluster is heterogeneous: the items are mostly unrelated bugs across generation, vision, audio, training, and memory/OOM paths. None of the soft pairs look like true duplicates, so there is no strong duplicate-canonical issue; issue 44387 is only the best representative by discussion activity and broad impact."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43504|issue:44857", "issue:44977|issue:45245", "issue:44387|issue:45229", "issue:43526|issue:44186", "issue:43262|issue:44206", "issue:43381|issue:43701", "issue:44442|issue:44898", "issue:29942|issue:36010", "issue:43232|issue:43388"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6704, "estimated_input_tokens": 3224, "item_count": 17, "node_count": 17, "serialized_chars": 12896, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:48:01Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c7ab324624448528ce0eb89d5ac86f3cc0db8fa3d0c1dcb2b78544afbdaf9b4e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29127", "issue:33357", "issue:35141", "issue:42915", "issue:43526", "issue:43550", "issue:43606", "issue:43701", "issue:43761", "issue:43931", "issue:43994", "issue:44188", "issue:44479", "issue:44857", "issue:45003", "issue:45290", "issue:45440"], "result": {"analyst_result": {"best_issue_reason": "Issue 45003 is the best representative because it is open, broadly scoped, and not tied to one narrow model or regression path. The other issues are more specific one-off failures and do not unify into a single concrete bug.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45003 is the broadest and most general runtime-safety report in the set, and it is still open; if one issue must anchor the cluster, it is the least model-specific. That said, the cluster does not contain a true duplicate set.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:29127", "reason": "Different subsystems and failures: LayoutLMv3 box-validation messaging vs CLIP community model bus error on macOS.", "right": "issue:33357"}, {"accept": false, "left": "issue:43550", "reason": "Bamba torch.compile/SDPA failure is unrelated to resume_from_checkpoint key-mapping mismatch.", "right": "issue:43701"}, {"accept": false, "left": "issue:43526", "reason": "BeitImageProcessorFast label reduction bug is unrelated to LwDetrImageLoss AMP/CUDA crash.", "right": "issue:44857"}, {"accept": false, "left": "issue:43606", "reason": "CPU offload device mismatch in bark-small is unrelated to checkpoint key mismatch on resume.", "right": "issue:43701"}, {"accept": false, "left": "issue:42915", "reason": "Both mention Qwen, but the concrete bugs differ: FineGrainedFP8Config failure vs weight-shape mismatch during model loading.", "right": "issue:43931"}, {"accept": false, "left": "issue:29127", "reason": "LayoutLMv3 error-message clarity and token-embedding reinit behavior are unrelated code paths.", "right": "issue:35141"}, {"accept": false, "left": "issue:43526", "reason": "Image processor reduce_labels bug does not match a Siglip2 AutoModel/pipeline output-quality regression.", "right": "issue:43994"}, {"accept": false, "left": "issue:45003", "reason": "sys.modules access safety issue is different from DeepseekV3MoE implementation divergence; too broad to merge.", "right": "issue:45440"}, {"accept": false, "left": "issue:44479", "reason": "Video-input regression for Qwen VL models is unrelated to chat template crashes on tool-call assistant messages.", "right": "issue:45290"}, {"accept": false, "left": "issue:43761", "reason": "CLIPVision hidden_states regression and torch.compile attention-kernel divergence are different failure modes and code paths.", "right": "issue:44188"}], "summary": "This cluster is heterogeneous: the issues span unrelated regressions in OCR/layout, vision models, multimodal chat templates, compilation, checkpointing, and runtime safety. None of the soft-similar pairs look like the same underlying bug or a mergeable PR-equivalent fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:29127|issue:33357", "issue:43550|issue:43701", "issue:43526|issue:44857", "issue:43606|issue:43701", "issue:42915|issue:43931", "issue:29127|issue:35141", "issue:43526|issue:43994", "issue:45003|issue:45440", "issue:44479|issue:45290", "issue:43761|issue:44188"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6960, "estimated_input_tokens": 3352, "item_count": 18, "node_count": 18, "serialized_chars": 13408, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:48:44Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c50a167359d60a583314302e3628625a7f7245c99bbb340372cf133cd2cbef83", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41628", "issue:43316", "issue:43525", "issue:43575", "issue:43646", "issue:43673", "issue:43701", "issue:43931", "issue:43994", "issue:44077", "issue:44206", "issue:44387", "issue:44743", "issue:44811", "issue:44964", "issue:45216", "issue:45372", "issue:45440"], "result": {"analyst_result": {"best_issue_reason": "Issue 43646 is the most generally phrased and could serve as a loose representative, though the cluster is not cohesive enough to treat it as a true canonical duplicate.", "best_pr_reason": null, "canonical_issue_reason": "If one issue must represent the cluster, issue 43646 is the broadest umbrella-style regression report, but it is still too generic to truly subsume the others.", "canonical_pr_reason": null, "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "issue:43316", "reason": "Both are config/API complaints, but they target different models and different missing attributes; not the same bug.", "right": "issue:43525"}, {"accept": false, "left": "issue:44743", "reason": "Different failure modes in different subsystems: recurrent cache reset vs Whisper decoding parameter handling.", "right": "issue:44811"}, {"accept": false, "left": "issue:44964", "reason": "Different models and different problems: a loading failure for Phi-4 multimodal vs a DeepSeekV3 implementation divergence.", "right": "issue:45440"}, {"accept": false, "left": "issue:43994", "reason": "Both involve vision/processor paths, but one is bad inference output and the other is a crash from an unsupported argument; not the same code-path issue.", "right": "issue:44206"}, {"accept": false, "left": "issue:43931", "reason": "Both mention Qwen checkpoints, but one is a weight-shape mismatch on load and the other is a save_pretrained regression.", "right": "issue:45216"}, {"accept": false, "left": "issue:43673", "reason": "Cache-related wording overlaps, but the affected models and concrete defects differ.", "right": "issue:44743"}, {"accept": false, "left": "issue:43646", "reason": "The first is a broad custom-model-init regression, while the second is a specific PatchTSMixer post_init policy issue.", "right": "issue:44077"}, {"accept": false, "left": "issue:43575", "reason": "One is an OOM during tensor-parallel load; the other is a checkpoint key mismatch. Different bugs.", "right": "issue:43701"}, {"accept": false, "left": "issue:43575", "reason": "Both mention OOM, but the causes differ materially: tensor parallel loading vs int4 quantization memory growth.", "right": "issue:44387"}, {"accept": false, "left": "issue:41628", "reason": "Both are import errors, but they involve different symbols, dependencies, and processor-loading paths.", "right": "issue:45372"}], "summary": "This cluster is mostly a similarity-only grouping of unrelated issue reports across different models, APIs, and regressions. None of the soft pairs look like the same underlying bug/change, so they should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43316|issue:43525", "issue:44743|issue:44811", "issue:44964|issue:45440", "issue:43994|issue:44206", "issue:43931|issue:45216", "issue:43673|issue:44743", "issue:43646|issue:44077", "issue:43575|issue:43701", "issue:43575|issue:44387", "issue:41628|issue:45372"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6478, "estimated_input_tokens": 3111, "item_count": 17, "node_count": 17, "serialized_chars": 12443, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:48:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "efbe325ad05b00c916198d2a10b515fbf0f0921db5b7b896b8e194d9fb9443db", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29942", "issue:33290", "issue:33666", "issue:34567", "issue:34689", "issue:39692", "issue:43381", "issue:43493", "issue:43611", "issue:43653", "issue:43701", "issue:43827", "issue:43873", "issue:44077", "issue:44206", "issue:44479", "issue:44871"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:29942", "reason": "Different areas: Flash Attention 2 test failures vs Qwen2-VL multi-GPU training. No shared code path or concrete bug.", "right": "issue:33666"}, {"accept": false, "left": "issue:43653", "reason": "BigBirdTokenizer special-token registration vs resume_from_checkpoint key mismatch are unrelated training/tokenizer problems.", "right": "issue:43701"}, {"accept": false, "left": "issue:33290", "reason": "Adafactor + DeepSpeed OOM is a memory/optimizer issue, while num_input_tokens_seen not updating is trainer-state accounting. Different bugs.", "right": "issue:34567"}, {"accept": false, "left": "issue:43493", "reason": "SigLIP2 implementation discrepancy is a model-implementation correctness issue; docs still referencing pipeline() is documentation fallout from v5 removals.", "right": "issue:43827"}, {"accept": false, "left": "issue:34689", "reason": "Both are load-time regressions, but for different causes: Llama 3.2 Vision breakage vs base_model_prefix handling in v5.0.0.", "right": "issue:43611"}, {"accept": false, "left": "issue:44077", "reason": "PatchTSMixer post_init restriction and Qwen video input regression affect different models and different code paths.", "right": "issue:44479"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 docs example errors are about example misuse and quantization; LasrFeatureExtractor center-arg regression is an API compatibility bug in another component.", "right": "issue:44206"}, {"accept": false, "left": "issue:43381", "reason": "Gradient checkpointing in eval mode and offloading with quantization are distinct runtime behaviors with no clear shared fix.", "right": "issue:43873"}, {"accept": false, "left": "issue:43873", "reason": "Quantization/offloading behavior is unrelated to Gemma-3 eos_token_id configuration inconsistency.", "right": "issue:44871"}], "summary": "These issues are thematically broad but not duplicates: they span different models, training/runtime features, docs regressions, and version-specific breakages. There is no single underlying bug or change that ties the cluster together, so all soft pairs should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:29942|issue:33666", "issue:43653|issue:43701", "issue:33290|issue:34567", "issue:43493|issue:43827", "issue:34689|issue:43611", "issue:44077|issue:44479", "issue:39692|issue:44206", "issue:43381|issue:43873", "issue:43873|issue:44871"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6964, "estimated_input_tokens": 3354, "item_count": 18, "node_count": 18, "serialized_chars": 13413, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:49:19Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "900526a1df8bc75b16537aeeab085c35fb1365cbbf259dd18674f403c3c53e3e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33290", "issue:34689", "issue:35141", "issue:42175", "issue:43316", "issue:43540", "issue:43643", "issue:43749", "issue:43931", "issue:44162", "issue:44190", "issue:44496", "issue:44514", "issue:44792", "issue:44855", "issue:44877", "issue:44913", "issue:45137"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:44190", "reason": "Different failures: local dataset loading in an example script vs unrecognized model/config loading for OLMo.", "right": "issue:44496"}, {"accept": false, "left": "issue:33290", "reason": "Different code paths: DeepSpeed Adafactor OOM vs embedding resize/output reinitialization after post_init.", "right": "issue:35141"}, {"accept": false, "left": "issue:44162", "reason": "Different models and symptoms: ESM2 regression vs a janus test failure for image generation.", "right": "issue:44792"}, {"accept": false, "left": "issue:33290", "reason": "Both are model/training-related, but one is an optimizer OOM and the other is a Llama 3.2 Vision model-loading break; not the same bug.", "right": "issue:34689"}, {"accept": false, "left": "issue:44855", "reason": "Completely different problems: Python 3.13 import parsing error vs DeepSpeed ZeRO3 deque underflow.", "right": "issue:45137"}, {"accept": false, "left": "issue:43540", "reason": "Both involve Qwen VL/video handling, but they fail in different APIs and model stacks; too different to merge.", "right": "issue:44514"}, {"accept": false, "left": "issue:43749", "reason": "Different underlying issues: FSDP CPU RAM efficient loading regression vs weight-shape mismatch for Qwen3-VL-30B-A3B-Instruct.", "right": "issue:43931"}, {"accept": false, "left": "issue:43643", "reason": "Different config-loading bugs: missing fields with trust_remote_code vs GPTNeoX rotary_pct not persisting on reload.", "right": "issue:44913"}, {"accept": false, "left": "issue:43316", "reason": "Different config discrepancies for different models; not the same defect.", "right": "issue:44877"}, {"accept": false, "left": "issue:42175", "reason": "Backend packaging issue for TensorFlow installation vs Gemma3TextConfig API discrepancy; unrelated.", "right": "issue:43316"}], "summary": "This cluster is not a true duplicate set: the issues cover unrelated bugs in training, model loading, config serialization, processor behavior, and environment/backend setup. All soft-similarity pairs should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44190|issue:44496", "issue:33290|issue:35141", "issue:44162|issue:44792", "issue:33290|issue:34689", "issue:44855|issue:45137", "issue:43540|issue:44514", "issue:43749|issue:43931", "issue:43643|issue:44913", "issue:43316|issue:44877", "issue:42175|issue:43316"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6788, "estimated_input_tokens": 3266, "item_count": 18, "node_count": 18, "serialized_chars": 13062, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:49:36Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a57584566082ad85dd61f02b9b909eabdd137a8114795b5ac2483dd88110f224", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33290", "issue:34689", "issue:36010", "issue:36331", "issue:41950", "issue:43065", "issue:43316", "issue:43381", "issue:43388", "issue:43404", "issue:43441", "issue:43475", "issue:43688", "issue:43716", "issue:43856", "issue:44617", "issue:45127", "issue:45200"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:43381", "reason": "Both mention memory/efficiency, but one is about gradient checkpointing being disallowed in eval mode while the other is MoE training memory usage; different code paths and fixes.", "right": "issue:43856"}, {"accept": false, "left": "issue:43388", "reason": "Both involve loss/metrics behavior in training, but one is a gather_for_metrics label-truncation bug and the other is auxiliary-loss normalization; unrelated symptoms and components.", "right": "issue:43688"}, {"accept": false, "left": "issue:33290", "reason": "AdaFactor OOM in DeepSpeed and CustomTrainer.compute_loss signature breakage are separate trainer/optimizer issues with no shared failure mode.", "right": "issue:36331"}, {"accept": false, "left": "issue:43316", "reason": "Gemma3TextConfig API mismatch and Mistral-3 image preprocessor dtype mismatch are model-specific configuration vs preprocessing bugs, not the same defect.", "right": "issue:43716"}, {"accept": false, "left": "issue:43404", "reason": "Tied lm_head weights in Mistral3ForConditionalGeneration and FlashAttention failures in Ministral-3 affect different model internals and execution paths.", "right": "issue:43441"}, {"accept": false, "left": "issue:34689", "reason": "Model loading breakage for Llama 3.2 Vision and GenerationMixin import errors are distinct regressions in different subsystems.", "right": "issue:36010"}, {"accept": false, "left": "issue:41950", "reason": "A video-classification pipeline choosing image processors and a Sam3Video missing fpn_position_embeddings are different SAM/video model bugs.", "right": "issue:43475"}, {"accept": false, "left": "issue:45127", "reason": "LoRA merge collapse with extended vocabulary and Gemma 4 mm_token_type_ids defaults are unrelated model-behavior issues.", "right": "issue:45200"}, {"accept": false, "left": "issue:43065", "reason": "Dummy Conv2d in Sam3PixelDecoder and Sam3Video CUDA OOM are not the same underlying problem; one is an implementation detail, the other is a runtime memory issue.", "right": "issue:44617"}], "summary": "This cluster is heterogeneous: the items describe unrelated bugs across training, loading, model architecture, and pipeline behavior. The soft pairs only show superficial similarity (e.g. shared memory/error wording), not the same underlying defect, so none should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43381|issue:43856", "issue:43388|issue:43688", "issue:33290|issue:36331", "issue:43316|issue:43716", "issue:43404|issue:43441", "issue:34689|issue:36010", "issue:41950|issue:43475", "issue:45127|issue:45200", "issue:43065|issue:44617"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7010, "estimated_input_tokens": 3377, "item_count": 18, "node_count": 18, "serialized_chars": 13506, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:49:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6e47538e6299020135f41f84eb2fac2483ea503910e168bb43ed106e15a284fb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:16998", "issue:22355", "issue:25251", "issue:28282", "issue:43295", "issue:43452", "issue:43646", "issue:43761", "issue:43825", "issue:43906", "issue:44077", "issue:44242", "issue:44479", "issue:44496", "issue:44534", "issue:44871", "issue:44913", "issue:45290"], "result": {"analyst_result": {"best_issue_reason": "No issue is clearly the global representative, since the cluster is not a duplicate set and the topics do not converge on one code path.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits this set; the items describe distinct bugs and questions rather than one underlying problem.", "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:16998", "reason": "Different problems: DeBERTa model_max_length behavior vs missing transformers.onnx module.", "right": "issue:22355"}, {"accept": false, "left": "issue:43761", "reason": "Both are regressions, but one is CLIP hidden_states output and the other is a Qwen video-input failure; different code paths.", "right": "issue:44479"}, {"accept": false, "left": "issue:25251", "reason": "Pipeline top_k output-shape change is unrelated to AutoModel/PyTorch import errors.", "right": "issue:28282"}, {"accept": false, "left": "issue:43295", "reason": "One is processor/tokenizer image handling in v4.57.5; the other is a pipeline error-message regression about translation tasks.", "right": "issue:43825"}, {"accept": false, "left": "issue:43295", "reason": "Custom processor/tokenizer regression and Gemma-3 eos_token_id config mismatch are distinct bugs.", "right": "issue:44871"}, {"accept": false, "left": "issue:43452", "reason": "Both involve model loading, but gguf_file breakage and an unrecognized-model/config.json issue are different failure modes.", "right": "issue:44496"}, {"accept": false, "left": "issue:44077", "reason": "Optional post_init validation for patchtsmixer is unrelated to non-persistent buffer initialization corruption.", "right": "issue:44534"}, {"accept": false, "left": "issue:43761", "reason": "CLIPVisionModel hidden_states regression is unrelated to MoE load-balancing loss computation.", "right": "issue:44242"}, {"accept": false, "left": "issue:43646", "reason": "Custom model initialization breakage in v5 is not the same as GPTNeoXConfig rotary_pct not persisting on reload.", "right": "issue:44913"}, {"accept": false, "left": "issue:43906", "reason": "An isolated reproduction of a prior issue is not the same bug as chat template crashes on tool-call assistant messages.", "right": "issue:45290"}], "summary": "These issues are heterogeneous and do not form a duplicate cluster: they span unrelated failures in model loading, pipelines, configs, chat templating, and version-specific regressions."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:16998|issue:22355", "issue:43761|issue:44479", "issue:25251|issue:28282", "issue:43295|issue:43825", "issue:43295|issue:44871", "issue:43452|issue:44496", "issue:44077|issue:44534", "issue:43761|issue:44242", "issue:43646|issue:44913", "issue:43906|issue:45290"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6432, "estimated_input_tokens": 3088, "item_count": 17, "node_count": 17, "serialized_chars": 12350, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:50:21Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c4ee7f9d59d14f5883b1ca6fef48f4af284755c08c4c35f993b6bddb0c8cb399", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41720", "issue:42617", "issue:43065", "issue:43575", "issue:43742", "issue:43931", "issue:44060", "issue:44479", "issue:44521", "issue:44610", "issue:44734", "issue:44792", "issue:44933", "issue:44977", "issue:45245", "issue:45362", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "If one must be chosen, issue 44521 is the most broadly phrased and has the strongest external references, but it still does not represent the rest of the items well.", "best_pr_reason": "No pull requests are present in the cluster.", "canonical_issue_reason": "No clear canonical issue: the reports span different models and code paths, so there is no single issue that cleanly represents the cluster.", "canonical_pr_reason": "No pull requests are present in the cluster.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43931", "reason": "Different models and failure modes: one is a Qwen3-VL weight-shape mismatch, the other is a Qwen3-Next tied-weights warning issue.", "right": "issue:44060"}, {"accept": false, "left": "issue:44521", "reason": "Both involve multimodal/video handling, but the bugs are different: all-zero assistant masks vs incorrect vision_position_ids in video input.", "right": "issue:45381"}, {"accept": false, "left": "issue:44933", "reason": "Unrelated symptoms and targets: a missing import from image_utils versus Qwen3.5 chat runtime crashes.", "right": "issue:45362"}, {"accept": false, "left": "issue:44479", "reason": "Different subsystems and root causes: video-input regression across Qwen VL variants versus KV-cache continuation indexing in transformers serve.", "right": "issue:44734"}, {"accept": false, "left": "issue:42617", "reason": "Both mention loading/parallelism, but one is a script failure and the other is an OOM while loading a specific model with tensor parallelism.", "right": "issue:43575"}, {"accept": false, "left": "issue:43575", "reason": "Different model families and errors: tensor-parallel OOM for Qwen2-57B versus a KeyError loading MobileLLM-125M.", "right": "issue:43742"}, {"accept": false, "left": "issue:43065", "reason": "Both are vision-model related, but one is a dummy layer choice in Sam3PixelDecoder and the other is a processor/model image-size mismatch in OmDet-Turbo.", "right": "issue:44610"}, {"accept": false, "left": "issue:44792", "reason": "No shared underlying bug: a Janus image-generation test failure versus a Qwen3.5 flash-attention generation issue.", "right": "issue:44977"}, {"accept": false, "left": "issue:41720", "reason": "Completely different errors: cudaErrorAssert on A800 for Qwen3 auto device mapping versus a category-cardinality RuntimeError.", "right": "issue:45245"}], "summary": "This cluster is heterogeneous: the items cover unrelated model-loading, multimodal preprocessing, serving, and tensor-shape bugs. None of the soft pairs look like the same underlying defect, so I would not merge any of them as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43931|issue:44060", "issue:44521|issue:45381", "issue:44933|issue:45362", "issue:44479|issue:44734", "issue:42617|issue:43575", "issue:43575|issue:43742", "issue:43065|issue:44610", "issue:44792|issue:44977", "issue:41720|issue:45245"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6870, "estimated_input_tokens": 3307, "item_count": 18, "node_count": 18, "serialized_chars": 13228, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:50:42Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d99b1fa535ef875d14d81ca25b3665d1300d090564e306e42b7a4031b70bda60", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:32090", "issue:35141", "issue:41720", "issue:43388", "issue:43575", "issue:43582", "issue:43650", "issue:43827", "issue:43994", "issue:44190", "issue:44220", "issue:44464", "issue:44534", "issue:44871", "issue:44913", "issue:44964", "issue:45127", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": "35141 is the best representative issue because it has a clear root-cause area, specific triggering condition, and a plausible fix surface. The other issues are either more generic, less actionable, or clearly unrelated to one another.", "best_pr_reason": null, "canonical_issue_reason": "Issue 35141 is the most concrete and well-scoped bug report: it describes a specific reproducible regression in `post_init` when `tie_word_embedding=False` after resizing embeddings.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:44464", "reason": "Different problems: chunked generation/compiled forward inconsistency vs. EOS token configuration mismatch. Shared generation context is too broad to treat as the same bug.", "right": "issue:44871"}, {"accept": false, "left": "issue:41720", "reason": "Both involve large Qwen model loading, but one is a device-map cuda assert and the other is tensor-parallel OOM. Different failure modes and likely different causes.", "right": "issue:43575"}, {"accept": false, "left": "issue:32090", "reason": "Trainer `_gpu_broadcast_one` TypeError with `NoneType` is unrelated to embedding resizing and `post_init` reinitialization behavior.", "right": "issue:35141"}, {"accept": false, "left": "issue:43827", "reason": "Docs referencing removed `pipeline()` APIs is a documentation cleanup issue; the Siglip2 issue is a model/pipeline output correctness bug. Not the same underlying problem.", "right": "issue:43994"}, {"accept": false, "left": "issue:44534", "reason": "Non-persistent buffer initialization junk and GPTNeoX rotary_pct reload regression are both config/state issues, but they affect different mechanisms and code paths.", "right": "issue:44913"}, {"accept": false, "left": "issue:43388", "reason": "`gather_for_metrics` label truncation and Apple Silicon allocator warmup `TypeError` are unrelated subsystems with different symptoms.", "right": "issue:43582"}, {"accept": false, "left": "issue:43650", "reason": "A vague request ('ADD THE DATA') is not the same underlying issue as `_torch_extract_fbank_features()` failing.", "right": "issue:44220"}, {"accept": false, "left": "issue:44190", "reason": "Both concern loading examples/models, but one is a local dataset issue in an example script and the other is a specific Phi-4 multimodal model load regression.", "right": "issue:44964"}, {"accept": false, "left": "issue:45127", "reason": "LoRA merging with extended vocab causing collapse is a different bug from incorrect visual encoder keys in `save_pretrained`; they affect different model components and save paths.", "right": "issue:45357"}], "summary": "This cluster is heterogeneous: it contains unrelated bugs spanning training, model loading, generation, config reload, docs, and Apple Silicon runtime errors. There is no strong duplicate theme across the items."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44464|issue:44871", "issue:41720|issue:43575", "issue:32090|issue:35141", "issue:43827|issue:43994", "issue:44534|issue:44913", "issue:43388|issue:43582", "issue:43650|issue:44220", "issue:44190|issue:44964", "issue:45127|issue:45357"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7102, "estimated_input_tokens": 3423, "item_count": 18, "node_count": 18, "serialized_chars": 13692, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:53:34Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e70fadd55d3b0bf6ffe246f773d2fc2c85ab2bdc366e20e120f8df8a4e1ca5ec", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36246", "issue:41628", "issue:43116", "issue:43335", "issue:43540", "issue:43644", "issue:43761", "issue:43976", "issue:44077", "issue:44079", "issue:44190", "issue:44355", "issue:44496", "issue:44734", "issue:44743", "issue:44855", "issue:44913", "issue:45372"], "result": {"analyst_result": {"best_issue_reason": "issue:45372 is the best representative issue because it is recent, open, and has a clearly scoped regression with downstream impact, even though it is not a duplicate hub.", "best_pr_reason": null, "canonical_issue_reason": "No true canonical duplicate stands out: the issues span different models, scripts, regressions, and import/config failures rather than one shared bug.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43761", "reason": "Different code paths and models: CLIPVisionModel hidden_states regression vs Qwen3OmniMoe recurrent-state reset with cache.", "right": "issue:44743"}, {"accept": false, "left": "issue:44355", "reason": "Both are import-related, but one is a generic compiled-Python failure and the other is a Python 3.13 torch.jit parsing/IndentationError in DeBERTaV2.", "right": "issue:44855"}, {"accept": false, "left": "issue:43335", "reason": "Unrelated bugs: SwitchTransformers sparse-layer config handling vs DeBERTaV2 import failure on Python 3.13.", "right": "issue:44855"}, {"accept": false, "left": "issue:43976", "reason": "A broad Python-version compatibility regression is not the same concrete bug as the DeBERTaV2 import parser issue.", "right": "issue:44855"}, {"accept": false, "left": "issue:43540", "reason": "Qwen3OmniMoe video-input validation and transformers serve KV-cache tensor indexing are different failures.", "right": "issue:44734"}, {"accept": false, "left": "issue:44496", "reason": "Model config resolution failure and mistral_common ReasoningEffort import breakage are distinct causes and fixes.", "right": "issue:45372"}, {"accept": false, "left": "issue:43644", "reason": "Global non-persistent buffer initialization bug vs GPTNeoXConfig rotary_pct persistence bug; not the same issue.", "right": "issue:44913"}, {"accept": false, "left": "issue:44077", "reason": "Different subsystems: patchtsmixer post_init validation vs ModelOutput key assignment logic.", "right": "issue:44079"}, {"accept": false, "left": "issue:43116", "reason": "Different example scripts and failures: multi-label classification output vs local dataset loading in image classification.", "right": "issue:44190"}, {"accept": false, "left": "issue:36246", "reason": "Both are import errors, but they concern different symbols and failure modes; not the same concrete missing-export bug.", "right": "issue:41628"}], "summary": "This cluster is not a duplicate set; it groups a variety of unrelated Transformers bugs and regressions. None of the soft-similarity pairs look like the same underlying issue, so all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43761|issue:44743", "issue:44355|issue:44855", "issue:43335|issue:44855", "issue:43976|issue:44855", "issue:43540|issue:44734", "issue:44496|issue:45372", "issue:43644|issue:44913", "issue:44077|issue:44079", "issue:43116|issue:44190", "issue:36246|issue:41628"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6624, "estimated_input_tokens": 3184, "item_count": 17, "node_count": 17, "serialized_chars": 12735, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:53:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "217235df961f0f03f43911282b18ea687473b4426c673c344a2d6f8f28460df9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36246", "issue:38175", "issue:38617", "issue:39692", "issue:41950", "issue:42175", "issue:43065", "issue:43295", "issue:43335", "issue:43404", "issue:44016", "issue:44190", "issue:44279", "issue:44871", "issue:45216", "issue:45245", "issue:45276"], "result": {"analyst_result": {"best_issue_reason": "No issue is a good global canonical candidate; the set spans unrelated reports rather than one bug resurfacing in multiple places.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out because the items are heterogeneous and do not share the same underlying defect.", "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:41950", "reason": "Video-classification pipeline processor lookup is unrelated to a notebook syntax error.", "right": "issue:44016"}, {"accept": false, "left": "issue:43295", "reason": "Different model-specific regressions: processor/tokenizer behavior vs Gemma4 embedding resizing.", "right": "issue:45276"}, {"accept": false, "left": "issue:43404", "reason": "Different concrete bugs affecting different models and code paths: tied lm_head weights vs save_pretrained checkpoint content.", "right": "issue:45216"}, {"accept": false, "left": "issue:36246", "reason": "Separate import errors for different symbols in different modules.", "right": "issue:38617"}, {"accept": false, "left": "issue:42175", "reason": "One is about pip extras/backends, the other is a vague dependency complaint without the same specific failure.", "right": "issue:44279"}, {"accept": false, "left": "issue:39692", "reason": "SigLIP2 documentation/example issues are not the same as a local dataset loading failure in a training script.", "right": "issue:44190"}, {"accept": false, "left": "issue:43335", "reason": "SwitchTransformers sparse-layer config bug is unrelated to dataset loading in image-classification no-trainer.", "right": "issue:44190"}, {"accept": false, "left": "issue:38175", "reason": "Unexpected probabilities in SigLIP2 and a dummy Conv2d in Sam3PixelDecoder are unrelated model bugs.", "right": "issue:43065"}, {"accept": false, "left": "issue:44871", "reason": "Gemma-3 eos token configuration mismatch is unrelated to the categories limit runtime error.", "right": "issue:45245"}], "summary": "These items are not a duplicate cluster: they describe unrelated bugs across different models, pipelines, notebooks, configs, and dependency/setup issues. All soft-edge pairs should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:41950|issue:44016", "issue:43295|issue:45276", "issue:43404|issue:45216", "issue:36246|issue:38617", "issue:42175|issue:44279", "issue:39692|issue:44190", "issue:43335|issue:44190", "issue:38175|issue:43065", "issue:44871|issue:45245"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6510, "estimated_input_tokens": 3127, "item_count": 17, "node_count": 17, "serialized_chars": 12507, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:54:21Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fef536c76b81b46eff37c0beb95475c6f0815cbb777ac02b9f5735b69f6791fe", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42907", "issue:43299", "issue:43441", "issue:43479", "issue:43854", "issue:44016", "issue:44112", "issue:44186", "issue:44220", "issue:44246", "issue:44466", "issue:44734", "issue:44792", "issue:44913", "issue:45003", "issue:45216", "issue:45375"], "result": {"analyst_result": {"best_issue_reason": "Issue 45216 is the strongest standalone issue to anchor on because it is precise, reproducible, and actionable; the other items are either narrower, more test-specific, or describe unrelated model/config paths.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45216 is the clearest concrete regression report: it names the model family, the failing API (`save_pretrained`), and a specific version threshold, making it the most representative issue here.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:44186", "reason": "Both are Transformers bugs, but one is a tokenizer/NER padding crash and the other is a serve/KV-cache indexing crash; different components and failure paths.", "right": "issue:44734"}, {"accept": false, "left": "issue:43479", "reason": "Both involve config handling, but they affect different model families and different fields (`None` defaults vs a missing `deepstack_visual_indexes` field); not the same underlying bug.", "right": "issue:45375"}, {"accept": false, "left": "issue:44246", "reason": "Import slowness and unsafe `sys.modules` access are related only loosely; the reports do not describe the same concrete defect or fix.", "right": "issue:45003"}, {"accept": false, "left": "issue:44016", "reason": "A notebook syntax error and an audio feature extraction issue are unrelated.", "right": "issue:44220"}, {"accept": false, "left": "issue:44466", "reason": "Both concern serialization/reload behavior, but one is about `lm_head.weight` tied-weight saving and the other about `rotary_pct` config persistence; different bugs.", "right": "issue:44913"}, {"accept": false, "left": "issue:44112", "reason": "Both are failing tests, but they target different models and different assertions; this is not the same bug.", "right": "issue:44792"}, {"accept": false, "left": "issue:42907", "reason": "Both mention Ministral/Devstral, but one is about saving dequantized weights and the other about FlashAttention loading; separate code paths.", "right": "issue:43441"}, {"accept": false, "left": "issue:43854", "reason": "Different model families and different failing scenarios; only shared symptom is a test failure.", "right": "issue:44792"}, {"accept": false, "left": "issue:43299", "reason": "Both are Qwen-related version regressions, but one breaks model loading while the other corrupts saved checkpoints; not the same underlying issue.", "right": "issue:45216"}], "summary": "The candidates are mostly superficial matches by subsystem or bug shape, but they describe different models, code paths, or failure modes. I would not merge any of the soft pairs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44186|issue:44734", "issue:43479|issue:45375", "issue:44246|issue:45003", "issue:44016|issue:44220", "issue:44466|issue:44913", "issue:44112|issue:44792", "issue:42907|issue:43441", "issue:43854|issue:44792", "issue:43299|issue:45216"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6544, "estimated_input_tokens": 3144, "item_count": 17, "node_count": 17, "serialized_chars": 12576, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:54:59Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "df8183d71849d0360f44618ccd936fd50cbdf04d6ef07a361283a4a529c9f20c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43295", "issue:43299", "issue:43643", "issue:43673", "issue:43824", "issue:43976", "issue:44016", "issue:44062", "issue:44112", "issue:44279", "issue:44336", "issue:44496", "issue:44933", "issue:44964", "issue:44987", "issue:45335", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": "issue:44964 is the most concrete, user-facing latest-transformers model-loading regression and the best representative issue in this mixed cluster.", "best_pr_reason": null, "canonical_issue_reason": "No clear canonical issue: this set spans unrelated regressions (model loading, config parsing, imports, CI, docs, and environment compatibility) rather than one duplicate bug.", "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:43295", "reason": "Different failures: processor/tokenizer multimodal regression vs resize_token_embeddings not propagating to decoder.embed_tokens.", "right": "issue:45335"}, {"accept": false, "left": "issue:43673", "reason": "Unrelated code paths: chunked_prefill cache handling vs ANSI color output in loading_report.", "right": "issue:44336"}, {"accept": false, "left": "issue:44016", "reason": "Notebook syntax error and AddedToken keyword-arg collision are distinct bugs.", "right": "issue:44062"}, {"accept": false, "left": "issue:43643", "reason": "Both involve loading/config, but one is trust_remote_code field loss and the other is missing model_type for an unrecognized model.", "right": "issue:44496"}, {"accept": false, "left": "issue:43824", "reason": "Import failures in different areas: missing Qwen2_5_VL class export vs a non-existent image_utils import.", "right": "issue:44933"}, {"accept": false, "left": "issue:43976", "reason": "Python-version compatibility issue vs a model-loading failure; not the same bug.", "right": "issue:44987"}, {"accept": false, "left": "issue:44112", "reason": "CI stale-test failure is unrelated to the Phi-4 multimodal loading regression.", "right": "issue:44964"}, {"accept": false, "left": "issue:43976", "reason": "General dependency complaint and Python 3.9/3.10 compatibility problem are not the same underlying issue.", "right": "issue:44279"}, {"accept": false, "left": "issue:43299", "reason": "Both are Qwen multimodal reports, but one is model loading and the other is save_pretrained key serialization; different code paths and fixes.", "right": "issue:45357"}], "summary": "Mixed cluster of transformer regression reports; the pairs are broadly related by theme but do not look like the same underlying bug, so no soft edges are accepted."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43295|issue:45335", "issue:43673|issue:44336", "issue:44016|issue:44062", "issue:43643|issue:44496", "issue:43824|issue:44933", "issue:43976|issue:44987", "issue:44112|issue:44964", "issue:43976|issue:44279", "issue:43299|issue:45357"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6806, "estimated_input_tokens": 3275, "item_count": 17, "node_count": 17, "serialized_chars": 13098, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:55:26Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "169bb639b78d78cd30b86cc5ece9d5b6c326d8b26efe6f08d387d1b8b20857c1", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43526", "issue:43577", "issue:43901", "issue:44075", "issue:44242", "issue:44297", "issue:44351", "issue:44368", "issue:44462", "issue:44496", "issue:44521", "issue:45042", "issue:45081", "issue:45200", "issue:45216", "issue:45278", "issue:45325"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:44075", "reason": "Different problems: SGD optimizer arguments being ignored vs a Qwen tie_word_embeddings warning during LoRA fine-tuning. Shared training context only, not the same bug.", "right": "issue:44368"}, {"accept": false, "left": "issue:43526", "reason": "BeitImageProcessorFast label reduction bug vs TextClassificationPipeline documentation mismatch. One is a runtime processing bug, the other is a docs/behavior note.", "right": "issue:43901"}, {"accept": false, "left": "issue:44496", "reason": "Unrecognized model config import error vs PIL backend image processors requiring torchvision. Both are loading-related but clearly distinct failure modes.", "right": "issue:45042"}, {"accept": false, "left": "issue:44462", "reason": "AutoTokenizer ignoring tokenizer.json vs a Mistral regex patch crash on backend_tokenizer access. Both involve tokenizers, but not the same code path or defect.", "right": "issue:45081"}, {"accept": false, "left": "issue:44521", "reason": "All-zero assistant masks in multimodal chat templating vs Qwen2.5-VL rope index temporal position scaling. Different multimodal internals and different outputs affected.", "right": "issue:45325"}, {"accept": false, "left": "issue:44242", "reason": "Missing load balancing loss when output_router_logits=False vs mm_token_type_ids required for text-only fine-tuning. Different model features and different broken behavior.", "right": "issue:45200"}, {"accept": false, "left": "issue:43901", "reason": "Docs mentioning return_all_scores vs an unrecognized model/config loading error. No shared bug or fix path.", "right": "issue:44496"}, {"accept": false, "left": "issue:43577", "reason": "BLIP2 dtype staying float32 when loading vs Qwen3.5 save_pretrained regression. Completely separate models and code paths.", "right": "issue:45216"}, {"accept": false, "left": "issue:44351", "reason": "Specific HybridCache import failure vs broad import errors after upgrading 4.57.0 to 5.5.0. The symptoms are too different to be the same issue.", "right": "issue:45278"}, {"accept": false, "left": "issue:44297", "reason": "Wrong tokenizer_class saved in tokenizer_config.json vs AutoTokenizer ignoring tokenizer.json. Both touch tokenizer persistence, but one is save-side metadata and the other is load-side selection logic.", "right": "issue:44462"}], "summary": "These items are a collection of unrelated Hugging Face Transformers bug reports and one docs issue; none of the soft pairs look like the same underlying defect, so all candidate merges should be rejected. There is no clear canonical issue or PR for the set as a whole."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44075|issue:44368", "issue:43526|issue:43901", "issue:44496|issue:45042", "issue:44462|issue:45081", "issue:44521|issue:45325", "issue:44242|issue:45200", "issue:43901|issue:44496", "issue:43577|issue:45216", "issue:44351|issue:45278", "issue:44297|issue:44462"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6864, "estimated_input_tokens": 3304, "item_count": 18, "node_count": 18, "serialized_chars": 13214, "soft_pair_count": 10}, "cached_at": "2026-04-14T20:55:49Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ce2d6ec592648060b04cde78471f2bd0903804e745c561832b30537b083f2874", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38175", "issue:41950", "issue:43479", "issue:43525", "issue:43650", "issue:43854", "issue:43976", "issue:44279", "issue:44462", "issue:44479", "issue:44496", "issue:44617", "issue:44779", "issue:44877", "issue:44964", "issue:45020", "issue:45245", "issue:45405"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43854", "reason": "Both are model-loading failures, but they affect different models and likely different code paths/symptoms.", "right": "issue:44964"}, {"accept": false, "left": "issue:44279", "reason": "These are separate dependency complaints: a generic transformers dependency issue vs an unreleased MIN_PEFT_VERSION bump.", "right": "issue:45405"}, {"accept": false, "left": "issue:44462", "reason": "Both involve tokenizers, but one is about ignoring tokenizer.json while the other is a DeepSeek tokenization regression.", "right": "issue:44779"}, {"accept": false, "left": "issue:44479", "reason": "Completely different failures: video-input regression in Qwen VL models vs a category-count runtime limit error.", "right": "issue:45245"}, {"accept": false, "left": "issue:43976", "reason": "Different version-compatibility problems affecting different dependencies (Python support vs PEFT version).", "right": "issue:45405"}, {"accept": false, "left": "issue:41950", "reason": "Video-classification processor lookup and a vague data request are not the same bug.", "right": "issue:43650"}, {"accept": false, "left": "issue:41950", "reason": "The first is a video-classification processor lookup bug; the second is broad remote_code breakage, not the same concrete issue.", "right": "issue:45020"}, {"accept": false, "left": "issue:43479", "reason": "Both mention configs, but one is bad default vision/audio init and the other is a missing pad_token_id attribute.", "right": "issue:43525"}, {"accept": false, "left": "issue:38175", "reason": "Unrelated symptoms: zero probabilities in siglip2 versus CUDA out-of-memory in Sam3Video.", "right": "issue:44617"}, {"accept": false, "left": "issue:44496", "reason": "Both concern model/config loading, but they are different errors and not clearly the same underlying fix.", "right": "issue:44877"}], "summary": "This cluster is heterogeneous: it mixes unrelated model-loading errors, tokenizer regressions, config/default bugs, dependency/version issues, and video pipeline problems. None of the soft pairs look like the same underlying bug, so all soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43854|issue:44964", "issue:44279|issue:45405", "issue:44462|issue:44779", "issue:44479|issue:45245", "issue:43976|issue:45405", "issue:41950|issue:43650", "issue:41950|issue:45020", "issue:43479|issue:43525", "issue:38175|issue:44617", "issue:44496|issue:44877"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6754, "estimated_input_tokens": 3249, "item_count": 18, "node_count": 18, "serialized_chars": 12995, "soft_pair_count": 9}, "cached_at": "2026-04-14T20:56:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e708bb142e3d99e94e08510ff63b5d6d84b1a361426694d410b38dc4b72ca991", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39692", "issue:42994", "issue:43295", "issue:43299", "issue:43317", "issue:43441", "issue:43450", "issue:43650", "issue:43723", "issue:43854", "issue:43901", "issue:44016", "issue:44162", "issue:44279", "issue:44291", "issue:44683", "issue:44779", "issue:45290"], "result": {"analyst_result": {"best_issue_reason": "If a single anchor is needed, issue 43295 is the clearest standalone report: it describes a concrete v4.57.5 regression, specific API breakage, and a focused reproduction path. But it is not a duplicate of the other issues in this set.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:43295", "reason": "Both mention tokenizer regressions in v5, but one is about processor.tokenizer / image-to-tokenizer behavior and the other is Deepseek tokenization correctness; different models, symptoms, and code paths.", "right": "issue:44779"}, {"accept": false, "left": "issue:43299", "reason": "Both are model-loading failures, but Qwen3VL MoE loading and GLM-4.7-Flash unit-test loading are distinct models with no concrete shared failure mode.", "right": "issue:43854"}, {"accept": false, "left": "issue:43441", "reason": "FlashAttention is the only overlap. One is a Ministral-3 model-specific regression; the other is a torch>=2.9 compiled flex_attention failure. Different kernels and root causes.", "right": "issue:44683"}, {"accept": false, "left": "issue:39692", "reason": "One is a SigLIP2 docs example with model/processor mismatch and quantization failure; the other is a TextClassificationPipeline docs mismatch about return_all_scores. Both are documentation issues but not the same bug.", "right": "issue:43901"}, {"accept": false, "left": "issue:43450", "reason": "Video processor batched-shape bug versus a generic dependency issue with transformers; no shared code path or symptom.", "right": "issue:44279"}, {"accept": false, "left": "issue:43723", "reason": "Tokenizer loading via AutoTokenizer in v5 and ESM2 being broken are both tokenizer-related at a high level, but they refer to different models and different breakages.", "right": "issue:44162"}, {"accept": false, "left": "issue:43650", "reason": "The title/content signal for 43650 is too generic to match the specific chat-template crash in 45290; no evidence of the same underlying bug.", "right": "issue:45290"}, {"accept": false, "left": "issue:42994", "reason": "Quantized model saving failure and dequantized model loading with device_map=auto offload are related to quantization broadly, but they are separate operations with different failure points.", "right": "issue:43317"}, {"accept": false, "left": "issue:44016", "reason": "A notebook syntax error and an init_empty_weights TypeError are unrelated failure types; no shared code path.", "right": "issue:44291"}], "summary": "These items are not a single duplicate cluster; they span unrelated bugs in tokenizers, model loading, attention kernels, docs, notebooks, and quantization/offload behavior. None of the soft pairs look like the same underlying issue or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43295|issue:44779", "issue:43299|issue:43854", "issue:43441|issue:44683", "issue:39692|issue:43901", "issue:43450|issue:44279", "issue:43723|issue:44162", "issue:43650|issue:45290", "issue:42994|issue:43317", "issue:44016|issue:44291"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6844, "estimated_input_tokens": 3294, "item_count": 18, "node_count": 18, "serialized_chars": 13175, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:00:31Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fd5b9e6dca681c40a59a2709fe6825d6729e47c5cae2a4e74a76e1030c47d2d2", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43065", "issue:43122", "issue:43262", "issue:43550", "issue:43650", "issue:43673", "issue:43761", "issue:44261", "issue:44263", "issue:44466", "issue:44521", "issue:44671", "issue:44779", "issue:44898", "issue:44964", "issue:45071", "issue:45276", "issue:45372"], "result": {"analyst_result": {"best_issue_reason": "Issue #44521 is the strongest standalone report: it has a specific multimodal `apply_chat_template()` failure, clear impact, and active discussion/inbound references.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical duplicate emerges; the issues describe distinct failures rather than the same underlying bug.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43550", "reason": "Different problems and code paths: torch.compile/SDPA failure in Bamba vs model-loading failure for Phi-4 multimodal.", "right": "issue:44964"}, {"accept": false, "left": "issue:43065", "reason": "Both are vision-related but not the same bug: dummy Conv2d in Sam3PixelDecoder vs Perceiver non-default-resolution interpolation failure.", "right": "issue:44898"}, {"accept": false, "left": "issue:44261", "reason": "Distinct defects: missing `rms_norm_eps` in MLA layernorm precision handling vs `torch.split()` return values in GlmMoeDsaIndexer.", "right": "issue:44263"}, {"accept": false, "left": "issue:43262", "reason": "Same API name, different failures: audio processor sampling-rate default mismatch vs multimodal assistant mask generation returning all zeros.", "right": "issue:44521"}, {"accept": false, "left": "issue:44466", "reason": "Different underlying issues: `lm_head.weight` serialization/tied-weights behavior vs Gemma4 embedding-resize propagation across layers.", "right": "issue:45276"}, {"accept": false, "left": "issue:44671", "reason": "Different models and symptoms: CamemBERT masked-LM prediction regression vs Deepseek tokenizer regression.", "right": "issue:44779"}, {"accept": false, "left": "issue:43122", "reason": "Tokenizer regression vs unrelated placeholder/data request; no shared bug mechanism.", "right": "issue:43650"}, {"accept": false, "left": "issue:45071", "reason": "Unrelated failures: `PretrainedConfig` type checking breakage vs Gemma 4 processor import error from `mistral_common` dependency.", "right": "issue:45372"}, {"accept": false, "left": "issue:43673", "reason": "Different code paths and symptoms: generation cache missing during chunked prefill vs CLIPVisionModel hidden_states being None despite request.", "right": "issue:43761"}], "summary": "No duplicate pair looks strong enough to merge: the items span unrelated bugs across tokenization, vision, audio, generation, config typing, and model-loading paths. All soft edges are rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43550|issue:44964", "issue:43065|issue:44898", "issue:44261|issue:44263", "issue:43262|issue:44521", "issue:44466|issue:45276", "issue:44671|issue:44779", "issue:43122|issue:43650", "issue:45071|issue:45372", "issue:43673|issue:43761"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6596, "estimated_input_tokens": 3170, "item_count": 17, "node_count": 17, "serialized_chars": 12679, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:00:49Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "19ca581ad7773de890a4148faa19c935d6a661dfd430d81432b757527403ccb5", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42175", "issue:43295", "issue:43352", "issue:43475", "issue:43525", "issue:43824", "issue:43881", "issue:43901", "issue:43976", "issue:43994", "issue:44016", "issue:44246", "issue:44617", "issue:44623", "issue:44749", "issue:45250", "issue:45397"], "result": {"analyst_result": {"best_issue_reason": "issue:43824 is the strongest standalone issue report here: it has clear user impact, multiple references, and a specific import failure, but it is not a canonical duplicate target for the rest.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits this cluster: the items describe different products, model families, and failure modes rather than one duplicate thread.", "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:43352", "reason": "Both mention Flash Attention 2.0, but one is a Nemotron model-specific unsupported-feature error and the other is a generic Flash Attention topic; not the same bug.", "right": "issue:45250"}, {"accept": false, "left": "issue:42175", "reason": "TensorFlow backend packaging with torch extras is unrelated to a Python 3.9/3.10 compatibility/install support issue.", "right": "issue:43976"}, {"accept": false, "left": "issue:44623", "reason": "Processor save_pretrained missing files is a serialization bug, while the Chinese issue is about inference/filtering slowing down after upgrade; different code paths.", "right": "issue:44749"}, {"accept": false, "left": "issue:43901", "reason": "Docs mismatch for return_all_scores is not the same as SigLIP2 producing bad outputs with AutoModel/pipeline.", "right": "issue:43994"}, {"accept": false, "left": "issue:44623", "reason": "Both involve processor/model saving or loading, but one is missing processor files and the other is a gemma-4 zero3 from_pretrained issue; distinct problems.", "right": "issue:45397"}, {"accept": false, "left": "issue:43295", "reason": "A processor.tokenizer regression is unrelated to Sam3Video CUDA out-of-memory.", "right": "issue:44617"}, {"accept": false, "left": "issue:43824", "reason": "Missing Qwen2_5_VL import and Python version incompatibility are separate release/package problems.", "right": "issue:43976"}, {"accept": false, "left": "issue:44246", "reason": "Slow import times are unrelated to a video model running out of memory.", "right": "issue:44617"}, {"accept": false, "left": "issue:43475", "reason": "A missing Sam3VisionEncoderOutput attribute is a runtime model API bug, while the notebook syntax error is a docs/notebook issue.", "right": "issue:44016"}, {"accept": false, "left": "issue:43525", "reason": "Llama4Config missing pad_token_id and glm-4v-9b loading failure are different model-specific attribute/loading errors.", "right": "issue:43881"}], "summary": "These are mostly unrelated issue reports spanning backend packaging, Flash Attention, multimodal processor regressions, model-specific load failures, docs, and performance. None of the soft pairs look like the same underlying bug or change, so they should all remain separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43352|issue:45250", "issue:42175|issue:43976", "issue:44623|issue:44749", "issue:43901|issue:43994", "issue:44623|issue:45397", "issue:43295|issue:44617", "issue:43824|issue:43976", "issue:44246|issue:44617", "issue:43475|issue:44016", "issue:43525|issue:43881"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6450, "estimated_input_tokens": 3097, "item_count": 17, "node_count": 17, "serialized_chars": 12385, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:01:07Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5bb65fd47afc730aed473653770b2b739058a8552c5613ff782457137b9d477b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41628", "issue:41950", "issue:43901", "issue:43976", "issue:44188", "issue:44220", "issue:44246", "issue:44297", "issue:44336", "issue:44361", "issue:44373", "issue:44462", "issue:44589", "issue:44749", "issue:45356", "issue:45362", "issue:45397"], "result": {"analyst_result": {"best_issue_reason": "No global best issue is suitable as an umbrella canonical because none of these reports plausibly subsumes the others.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out; the set spans unrelated topics (tokenizers, pipelines, import/runtime errors, docs, and performance).", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:44589", "reason": "Both involve failures during model/tokenizer use, but one is a missing Float8 storage type and the other is a Qwen chat crash; different code paths and likely different root causes.", "right": "issue:45362"}, {"accept": false, "left": "issue:43901", "reason": "Docs/behavior mismatch for TextClassificationPipeline is unrelated to intermittent slow import time.", "right": "issue:44246"}, {"accept": false, "left": "issue:44188", "reason": "One is a torch.compile attention-kernel divergence bug; the other is terminal ANSI formatting in loading_report. Same library, different functionality.", "right": "issue:44336"}, {"accept": false, "left": "issue:41628", "reason": "Missing AutoImageProcessor import and Python version compatibility regression are not the same defect.", "right": "issue:43976"}, {"accept": false, "left": "issue:44462", "reason": "Both are tokenizer-related, but one is AutoTokenizer ignoring tokenizer.json and the other is a Kimi-K2.5 codec/regression issue; not the same underlying bug.", "right": "issue:45356"}, {"accept": false, "left": "issue:41950", "reason": "Video-classification pipeline image-processor lookup and _torch_extract_fbank_features() are unrelated pipeline vs audio feature-extraction problems.", "right": "issue:44220"}, {"accept": false, "left": "issue:44297", "reason": "Tokenizer class mismatch in save_pretrained and Gemma-4 zero3 from_pretrained crashes are different failure modes with different likely fixes.", "right": "issue:45397"}, {"accept": false, "left": "issue:44361", "reason": "MLukeTokenizer AttributeError and a performance slowdown after version upgrade are not the same bug.", "right": "issue:44749"}, {"accept": false, "left": "issue:44336", "reason": "ANSI color output in loading_report and a wrong docstring for position_ids are clearly separate documentation/UX issues.", "right": "issue:44373"}], "summary": "These items are a heterogeneous mix of unrelated bugs, regressions, docs issues, and performance reports. All proposed soft pairs look superficially similar by subsystem or wording, but they do not appear to describe the same underlying bug/change, so they should be rejected as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44589|issue:45362", "issue:43901|issue:44246", "issue:44188|issue:44336", "issue:41628|issue:43976", "issue:44462|issue:45356", "issue:41950|issue:44220", "issue:44297|issue:45397", "issue:44361|issue:44749", "issue:44336|issue:44373"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6810, "estimated_input_tokens": 3277, "item_count": 18, "node_count": 18, "serialized_chars": 13106, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:01:40Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9bc9c74552ed2acff185a6401f4811e9604830c3b91f903afced120718f6c551", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36246", "issue:42491", "issue:43299", "issue:43526", "issue:43531", "issue:43673", "issue:43994", "issue:44246", "issue:44295", "issue:44336", "issue:44351", "issue:44485", "issue:44496", "issue:44561", "issue:45003", "issue:45276", "issue:45362", "issue:45397"], "result": {"analyst_result": {"best_issue_reason": "If one issue must represent the cluster, 43299 is the strongest concrete example of the recurring v5 regression theme (Qwen3/Qwen3-MoE loading breakage) and is more specific than the broader or more generic reports.", "best_pr_reason": null, "canonical_issue_reason": "No clear canonical duplicate stands out because the items span different bugs and code paths; the closest shared theme is v5/model-loading breakage, but not a single underlying issue.", "canonical_pr_reason": null, "confidence": 0.82, "soft_edge_verdicts": [{"accept": false, "left": "issue:42491", "reason": "Both mention Qwen3/MoE and hf5.x/v5 breakage, but one is about LoRA compatibility on qwen3_moe and the other about Qwen3VL MoE loading; different failure modes and likely different fixes.", "right": "issue:43299"}, {"accept": false, "left": "issue:44246", "reason": "Unrelated bugs: import slowness vs. an unrecognized-model/config issue for a specific checkpoint.", "right": "issue:44496"}, {"accept": false, "left": "issue:36246", "reason": "Both are import errors, but they involve different missing exports and different model subsystems; not the same bug.", "right": "issue:44351"}, {"accept": false, "left": "issue:43531", "reason": "Qwen3-MoE sliding_window behavior and Qwen3.5 chat crashes are different code paths and symptoms.", "right": "issue:45362"}, {"accept": false, "left": "issue:44295", "reason": "A position_ids buffer read error is not the same as gemma-4 zero3/from_pretrained behavior.", "right": "issue:45397"}, {"accept": false, "left": "issue:43526", "reason": "BeitImageProcessorFast reduce_labels bug and unsafe sys.modules access in modeling_utils are unrelated.", "right": "issue:45003"}, {"accept": false, "left": "issue:43994", "reason": "SigLIP2 nonsensical outputs via AutoModel/pipeline and gemma4 embedding resizing are separate model-specific issues.", "right": "issue:45276"}, {"accept": false, "left": "issue:44336", "reason": "ANSI code leakage in loading_report is unrelated to GLM-5 RoPE implementation.", "right": "issue:44485"}, {"accept": false, "left": "issue:43673", "reason": "Both are v5 regressions, but one is about chunked_prefill cache handling and the other about removal of is_torch_fx_available; different breakages and fixes.", "right": "issue:44561"}], "summary": "The cluster is mostly heterogeneous: several unrelated model-loading, generation, import, and UI/reporting bugs are grouped together, but the soft-linked pairs do not look like true duplicates. Most similarities are only at the level of shared model families or v5 regression wording."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42491|issue:43299", "issue:44246|issue:44496", "issue:36246|issue:44351", "issue:43531|issue:45362", "issue:44295|issue:45397", "issue:43526|issue:45003", "issue:43994|issue:45276", "issue:44336|issue:44485", "issue:43673|issue:44561"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6816, "estimated_input_tokens": 3280, "item_count": 18, "node_count": 18, "serialized_chars": 13119, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:02:05Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "ef24e9141fda8460f192b9df806bce2c3b5627e3bd9378157212bdbb057601d7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36246", "issue:42886", "issue:43066", "issue:43441", "issue:43540", "issue:43596", "issue:44162", "issue:44242", "issue:44246", "issue:44291", "issue:44779", "issue:44857", "issue:44871", "issue:44877", "issue:45200", "issue:45229", "issue:45362", "issue:45375"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue: the items span unrelated failures across tokenizers, config validation, multimodal models, training/runtime errors, and performance complaints.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:44291", "reason": "Both are crashes, but they affect different code paths and models: init_empty_weights/_is_hf_initialized versus Qwen3.5 chat generation. Similar symptom, not the same bug.", "right": "issue:45362"}, {"accept": false, "left": "issue:44242", "reason": "Load-balancing loss omission in MoE routing and an AMP/CUDA crash in LwDetrImageLoss are unrelated defects in different subsystems.", "right": "issue:44857"}, {"accept": false, "left": "issue:43066", "reason": "Both mention tokenizers in v5, but one is about decoder type handling while the other is DeepSeek tokenization correctness. Different model families and failure modes.", "right": "issue:44779"}, {"accept": false, "left": "issue:44162", "reason": "ESM2 breakage and Gemma 4 mm_token_type_ids defaults are model-specific issues with different causes and fixes.", "right": "issue:45200"}, {"accept": false, "left": "issue:36246", "reason": "An ImportError for Qwen2_5_VLImageProcessor is unrelated to a deepspeed zero3/BertModel index error.", "right": "issue:43596"}, {"accept": false, "left": "issue:44877", "reason": "Both involve strict config handling, but they concern different models and different concrete symptoms (load failure vs. field silently dropped). Too broad to treat as the same bug.", "right": "issue:45375"}, {"accept": false, "left": "issue:42886", "reason": "Offline tokenizer cache loading and Gemma4 multi-GPU CUDA OOM are unrelated runtime problems.", "right": "issue:45229"}, {"accept": false, "left": "issue:43441", "reason": "FlashAttention failure for Ministral-3 and a Qwen3OmniMoe video-processing ValueError are different code paths and not mergeable as one fix.", "right": "issue:43540"}, {"accept": false, "left": "issue:44246", "reason": "Import-time slowness and an eos_token_id config inconsistency are unrelated issues.", "right": "issue:44871"}], "summary": "This cluster is mostly a grab-bag of unrelated model-specific regressions and infra bugs. None of the soft pairs are strong duplicates, so I would not merge any of them into a single canonical issue."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44291|issue:45362", "issue:44242|issue:44857", "issue:43066|issue:44779", "issue:44162|issue:45200", "issue:36246|issue:43596", "issue:44877|issue:45375", "issue:42886|issue:45229", "issue:43441|issue:43540", "issue:44246|issue:44871"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6528, "estimated_input_tokens": 3136, "item_count": 17, "node_count": 17, "serialized_chars": 12543, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:02:40Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a803da39c18a9c266373d1325325bc2b1bdd3616fd2bbc5f241c48832a705d56", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41628", "issue:41950", "issue:43065", "issue:43232", "issue:43262", "issue:43408", "issue:43901", "issue:44246", "issue:44295", "issue:44336", "issue:44496", "issue:44749", "issue:44779", "issue:44995", "issue:45003", "issue:45200", "issue:45292"], "result": {"analyst_result": {"best_issue_reason": "No issue is a good global representative here; the titles and problem statements are too heterogeneous to safely collapse into one canonical bug report.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out because the items describe distinct bugs in different subsystems/models rather than the same underlying defect.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "Cannot import name 'AutoImageProcessor' from 'transformers'", "reason": "Related area, but different failure modes: one is an import/export problem, the other is pipeline logic selecting the wrong processor type.", "right": "video-classification pipeline looks for image processors"}, {"accept": false, "left": "[Bug] GlmMoeDsa crashes on second forward pass \u2014 stale indexer cache", "reason": "Completely different models and code paths; one is a cache/state bug, the other is a Gemma input-defaulting issue.", "right": "[Gemma 4] mm_token_type_ids required for text-only fine-tuning - should default to zeros"}, {"accept": false, "left": "Deepseek tokenizer produces incorrect results as of v5 (works in v4)", "reason": "Tokenizer correctness and embedding-resize behavior are separate bugs with no shared underlying change.", "right": "resize_token_embeddings does not effect to output_embeddings"}, {"accept": false, "left": "TextClassificationPipeline docs still mention return_all_scores, but behavior differs", "reason": "Documentation mismatch versus import-time/module-access safety are unrelated.", "right": "modeling_utils unsafely accesses sys.modules[]"}, {"accept": false, "left": "_update_model_kwargs_for_generation after sync_gpus when generation", "reason": "Different subsystems and symptoms; generation-state handling is unrelated to terminal-color output.", "right": "Some ANSI codes are generated by utils/loading_report even when not connected to terminal"}, {"accept": false, "left": "An error occurs when reading position_ids after registering it as a buffer.", "reason": "A tensor/buffer access error is not the same underlying issue as a performance regression after upgrade.", "right": "Transformer \u4ece4.57.3 \u5347\u7ea7\u52305.3.0 \u540e\u8fc7\u6ee4\u6570\u636e\u65f6\u957f\u53d8\u6162\u5341\u500d\u4ee5\u4e0a"}, {"accept": false, "left": "Dummy `nn.Conv2d` in `Sam3PixelDecoder`", "reason": "Both touch SAM3, but one is an implementation placeholder and the other is a config/model-type mismatch warning.", "right": "Warning: You are using a model of type sam3_video to instantiate a model of type sam3_tracker"}, {"accept": false, "left": "Audio processors: `apply_chat_template()` defaults to 16kHz sampling rate, even if the processor config sets a different value", "reason": "Audio preprocessing default-rate handling is unrelated to slow import performance.", "right": "import transformers takes long sometimes"}, {"accept": false, "left": "ValueError: Unrecognized model in allenai/Olmo-Hybrid-Instruct-SFT-7B. Should have a `model_type` key in its config.json.", "reason": "One is a model config loading error, the other is an internal module lookup safety issue; they do not indicate the same bug.", "right": "modeling_utils unsafely accesses sys.modules[]"}], "summary": "The cluster is not a duplicate set; it is a collection of unrelated bug reports across different models, pipelines, and utility code that only share broad Transformers vocabulary."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:41628|issue:41950", "issue:44995|issue:45200", "issue:44779|issue:45292", "issue:43901|issue:45003", "issue:43232|issue:44336", "issue:44295|issue:44749", "issue:43065|issue:43408", "issue:43262|issue:44246", "issue:44496|issue:45003"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6960, "estimated_input_tokens": 3352, "item_count": 18, "node_count": 18, "serialized_chars": 13408, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:03:04Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7b0b8178b7c40298509f1e3a6beff283daa2d4c4dcfb6abb3c266d58d2225c2f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43257", "issue:43295", "issue:43475", "issue:43540", "issue:43653", "issue:43673", "issue:44060", "issue:44077", "issue:44368", "issue:44466", "issue:44734", "issue:44749", "issue:44869", "issue:44977", "issue:45137", "issue:45292", "issue:45335", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "issue:44466 is the strongest issue to keep as the representative example because it is specific, reproducible, and narrowly scoped while still clearly describing the failure mode.", "best_pr_reason": null, "canonical_issue_reason": "issue:44466 is the most representative standalone bug report: it has a clear regression framing, a concrete observable failure, and enough detail to diagnose tied-weight serialization behavior across versions/devices.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43257", "reason": "Both mention DeepSpeed, but one is Qwen3 MOE weight conversion/loading and the other is a ZeRO3 deque IndexError; different failure paths.", "right": "issue:45137"}, {"accept": false, "left": "issue:43475", "reason": "Different models and different bugs: missing SAM3 vision encoder attribute vs Qwen3OmniMoe video input ValueError.", "right": "issue:43540"}, {"accept": false, "left": "issue:43295", "reason": "Both touch processor/tokenizer or embeddings, but one is a processor.tokenizer regression and the other is resize_token_embeddings not updating output embeddings.", "right": "issue:45292"}, {"accept": false, "left": "issue:44060", "reason": "Unrelated model families and symptoms; a tied-weights warning in Qwen3-Next is not the same as patchtsmixer post_init gating.", "right": "issue:44077"}, {"accept": false, "left": "issue:43673", "reason": "Both are cache-related, but one is GenerationMixin cache missing in chunked_prefill and the other is a serve-time KV continuation tensor indexing crash.", "right": "issue:44734"}, {"accept": false, "left": "issue:44869", "reason": "Both involve decoding/tokenization regressions, but they affect different models and different failure mechanisms.", "right": "issue:45356"}, {"accept": false, "left": "issue:44368", "reason": "One is a tie_word_embeddings warning during Qwen3.5 LoRA fine-tuning; the other is a flash-attention generation failure. Not the same bug.", "right": "issue:44977"}, {"accept": false, "left": "issue:44466", "reason": "Both concern embedding weights, but one is inconsistent lm_head serialization by device and the other is resize_token_embeddings not propagating to decoder.embed_tokens.", "right": "issue:45335"}, {"accept": false, "left": "issue:43653", "reason": "A BigBirdTokenizer special-token bug and a performance regression in filtering after upgrade are unrelated.", "right": "issue:44749"}], "summary": "These are mostly unrelated bug reports that only share broad subsystem keywords (tokenizers, embeddings, cache, DeepSpeed, Qwen). None of the soft pairs looks like the same underlying issue, so no duplicate merges are justified."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43257|issue:45137", "issue:43475|issue:43540", "issue:43295|issue:45292", "issue:44060|issue:44077", "issue:43673|issue:44734", "issue:44869|issue:45356", "issue:44368|issue:44977", "issue:44466|issue:45335", "issue:43653|issue:44749"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6510, "estimated_input_tokens": 3127, "item_count": 17, "node_count": 17, "serialized_chars": 12506, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:03:48Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1b1c16262ffe0636a47cf313090534a94cce0e4fbad008bf8148dcf511604eeb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42673", "issue:43232", "issue:43381", "issue:43441", "issue:43606", "issue:43646", "issue:43673", "issue:43704", "issue:43931", "issue:44162", "issue:44297", "issue:44368", "issue:44442", "issue:44623", "issue:44749", "issue:45278", "issue:45292"], "result": {"analyst_result": {"best_issue_reason": "issue:43704 is the strongest representative of the only true duplicate pair in the set; the rest of the items are unrelated or only loosely similar.", "best_pr_reason": null, "canonical_issue_reason": "issue:43704 is the best canonical issue: it is the newer/open instance of the exact same Qwen3ForCausalLM VRAM leak reported in 42673, with identical title and symptom.", "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:44442", "reason": "Both mention tokenizer/model behavior, but one is a FastSpeech2ConformerTokenizer loading failure and the other is a slowdown after upgrading Transformers; different bugs and symptoms.", "right": "issue:44749"}, {"accept": false, "left": "issue:43232", "reason": "Both involve generation internals, but one is about _update_model_kwargs_for_generation after sync_gpus and the other is a missing GenerationMixin cache in chunked_prefill; not the same code-path problem.", "right": "issue:43673"}, {"accept": false, "left": "issue:43931", "reason": "Qwen3-VL weight-shape mismatch and a tie_word_embeddings warning during LoRA fine-tuning are distinct model/config issues, not duplicates.", "right": "issue:44368"}, {"accept": false, "left": "issue:43441", "reason": "FlashAttention failure for Ministral-3 and CPU offload device-mismatch for bark-small are different model/backend failures.", "right": "issue:43606"}, {"accept": false, "left": "issue:43673", "reason": "A missing cache during chunked_prefill is a specific generation bug; the other report is broad import errors after upgrading versions.", "right": "issue:45278"}, {"accept": false, "left": "issue:43646", "reason": "Custom model initialization breakage in Transformers 5.0.0 is not the same as ESM2 being broken; different affected paths and symptoms.", "right": "issue:44162"}, {"accept": true, "left": "issue:42673", "reason": "Same exact title and same VRAM leak in Qwen3ForCausalLM when used across multiple dataloader threads; clearly the same underlying bug.", "right": "issue:43704"}, {"accept": false, "left": "issue:44297", "reason": "Tokenizer.save_pretrained writing the wrong tokenizer_class and processor.save_pretrained missing files are both serialization bugs but affect different objects and failure modes.", "right": "issue:44623"}, {"accept": false, "left": "issue:43381", "reason": "Gradient checkpointing in eval mode and resize_token_embeddings not updating output embeddings are unrelated model API bugs.", "right": "issue:45292"}], "summary": "This set is mostly heterogeneous. The only clear duplicate pair is the Qwen3ForCausalLM VRAM leak report (42673 \u2194 43704); the other soft pairs share broad Transformers areas but describe different bugs and should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44442|issue:44749", "issue:43232|issue:43673", "issue:43931|issue:44368", "issue:43441|issue:43606", "issue:43673|issue:45278", "issue:43646|issue:44162", "issue:42673|issue:43704", "issue:44297|issue:44623", "issue:43381|issue:45292"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6884, "estimated_input_tokens": 3314, "item_count": 18, "node_count": 18, "serialized_chars": 13255, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:04:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f531cbfe87e1c77b0dd7c8fcbf49262a30095d4cbd2baef980de1374b4a4ebeb", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:28282", "issue:30990", "issue:36246", "issue:41628", "issue:42371", "issue:42898", "issue:43381", "issue:43452", "issue:43475", "issue:43824", "issue:44162", "issue:44933", "issue:45003", "issue:45081", "issue:45092", "issue:45103", "issue:45335", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "43824 is the best issue to anchor the cluster because it is the clearest, most central instance of the Qwen2.5-VL import regression and has the most downstream attention.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43824 is the strongest representative of the one plausible duplicate theme: Qwen2.5-VL import/export breakage. It is also broader than the submodule-specific image-processor report.", "canonical_pr_reason": null, "confidence": 0.74, "soft_edge_verdicts": [{"accept": false, "left": "issue:28282", "reason": "Different failures: missing PyTorch dependency vs. hanging Sentence Transformers load. Not the same underlying bug.", "right": "issue:30990"}, {"accept": false, "left": "issue:42898", "reason": "A tokenizer cleanup deprecation/change issue is unrelated to an ESM2 model breakage.", "right": "issue:44162"}, {"accept": false, "left": "issue:43381", "reason": "Gradient checkpointing in eval mode and t5gemma embedding resize are separate behaviors in different code paths.", "right": "issue:45335"}, {"accept": true, "left": "issue:36246", "reason": "Both report Qwen2.5-VL import/export failures for missing symbols from transformers; this looks like the same broken export surface for the same model family.", "right": "issue:43824"}, {"accept": false, "left": "issue:43452", "reason": "gguf_file/from_pretrained loading problems and unsafe sys.modules access are not the same concrete bug.", "right": "issue:45003"}, {"accept": false, "left": "issue:41628", "reason": "Both are import-related, but they cite different missing symbols and different modules; too broad to be the same bug.", "right": "issue:44933"}, {"accept": false, "left": "issue:42371", "reason": "TF32 API guidance and InternVL2 meta-init incompatibility are unrelated issues.", "right": "issue:45092"}, {"accept": false, "left": "issue:43475", "reason": "SAM3 missing attribute and Qwen2.5-VL video position-id mismatch are different model-specific bugs.", "right": "issue:45381"}, {"accept": false, "left": "issue:45081", "reason": "Mistral regex patch crash and auto_docstring annotation crash affect different subsystems and failure modes.", "right": "issue:45103"}], "summary": "These items are mostly unrelated bugs; the only plausible duplicate pair is the two Qwen2.5-VL import/export failures, which likely share the same missing symbol export issue. The rest are different code paths or different features entirely."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:28282|issue:30990", "issue:42898|issue:44162", "issue:43381|issue:45335", "issue:36246|issue:43824", "issue:43452|issue:45003", "issue:41628|issue:44933", "issue:42371|issue:45092", "issue:43475|issue:45381", "issue:45081|issue:45103"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6822, "estimated_input_tokens": 3283, "item_count": 18, "node_count": 18, "serialized_chars": 13130, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:04:59Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "554f57419acd39c8de2822115424480ac411ecda4c4b552f90496bd1951d7858", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42222", "issue:42994", "issue:43232", "issue:43475", "issue:43525", "issue:43643", "issue:43873", "issue:44038", "issue:44188", "issue:44623", "issue:44661", "issue:44945", "issue:45200", "issue:45230", "issue:45310", "issue:45362", "issue:45375", "issue:45397"], "result": {"analyst_result": {"best_issue_reason": "issue:45310 is the clearest user-facing symptom report for the Qwen3.5 MoE loading failure and includes the version context (`transformers>=5.4.0`), making it the best top-level representative issue.", "best_pr_reason": null, "canonical_issue_reason": "issue:45375 is the most root-cause-specific report in the only likely duplicate pair: it names the missing `deepstack_visual_indexes` field and the `@strict` drop behavior, which likely explains the broader from_pretrained failure.", "canonical_pr_reason": null, "confidence": 0.78, "soft_edge_verdicts": [{"accept": false, "left": "issue:42222", "reason": "Different models and different failure modes; one is a VitPose loading breakage, the other is Gemma 4 text-only finetuning needing default token type ids.", "right": "issue:45200"}, {"accept": false, "left": "issue:43475", "reason": "Both involve missing attributes/fields, but the affected objects and code paths differ: SAM 3 video output vs AutoConfig remote-code loading.", "right": "issue:43643"}, {"accept": false, "left": "issue:42994", "reason": "Quantized-model save failure is unrelated to a Llama4 config missing `pad_token_id`; these are not the same bug.", "right": "issue:43525"}, {"accept": true, "left": "issue:45310", "reason": "Both point to the same Qwen3.5 MoE config-loading/from_pretrained path; the missing `deepstack_visual_indexes` field is a plausible root cause of the broader from_pretrained error.", "right": "issue:45375"}, {"accept": false, "left": "issue:44038", "reason": "Same broad area of model/processor handling, but the concrete problems differ: Qwen3-VL-Moe bug vs `processor.save_pretrained(...)` missing files.", "right": "issue:44623"}, {"accept": false, "left": "issue:43873", "reason": "Quantization/offloading behavior and pipeline-parallel output correctness are different runtime problems and not the same underlying change or fix.", "right": "issue:44945"}, {"accept": false, "left": "issue:45230", "reason": "`Bug report` is too generic to match the specific Gemma-4 ZeRO3 from_pretrained failure; no clear same-bug evidence.", "right": "issue:45397"}, {"accept": false, "left": "issue:43475", "reason": "Both touch model/config plumbing, but one is a missing vision-model attribute and the other is a registry/mapping failure for add-new-model-like.", "right": "issue:44661"}, {"accept": false, "left": "issue:45362", "reason": "Qwen3.5 chat crashes and a missing vision-config field are related only by model family, not by the same concrete bug.", "right": "issue:45375"}, {"accept": false, "left": "issue:43232", "reason": "Generation kwarg updates after sync_gpus and attention-kernel divergence under torch.compile are unrelated code paths.", "right": "issue:44188"}], "summary": "The cluster is mostly heterogeneous; only one pair appears to be the same underlying Qwen3.5 MoE config-loading bug. The rest are similar at a high level (missing fields, saving, quantization, generation) but not the same concrete issue."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42222|issue:45200", "issue:43475|issue:43643", "issue:42994|issue:43525", "issue:45310|issue:45375", "issue:44038|issue:44623", "issue:43873|issue:44945", "issue:45230|issue:45397", "issue:43475|issue:44661", "issue:45362|issue:45375", "issue:43232|issue:44188"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6548, "estimated_input_tokens": 3146, "item_count": 17, "node_count": 17, "serialized_chars": 12581, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:05:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "efe7471c6ccda5481ec6857e889a29de41c0bbfc1f0610ba7f4d698046fc3c15", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30990", "issue:35141", "issue:36010", "issue:41628", "issue:42831", "issue:42994", "issue:43232", "issue:43525", "issue:43673", "issue:44164", "issue:44188", "issue:44222", "issue:44351", "issue:44792", "issue:45310", "issue:45375", "issue:45397"], "result": {"analyst_result": {"best_issue_reason": "Issue 44164 is the strongest standalone anchor by specificity and diagnostic detail, but it should not be treated as canonical for the whole cluster since the rest are unrelated.", "best_pr_reason": null, "canonical_issue_reason": "No single issue cleanly represents the cluster because the titles point to separate bugs; if forced, issue 44164 is the broadest and most detailed serialization-related report, but it is not a true duplicate hub.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:41628", "reason": "Both are import errors, but they involve different symbols (`AutoImageProcessor` vs `HybridCache`) and likely different breakages.", "right": "issue:44351"}, {"accept": false, "left": "issue:44222", "reason": "Both mention MoE/from_pretrained or saving/loading, but one is an FP8 save_pretrained bug and the other is a Qwen3.5 from_pretrained error; different code paths.", "right": "issue:45310"}, {"accept": false, "left": "issue:42831", "reason": "One is an FP8 accuracy regression, the other is an FP8 save_pretrained failure; same broad area, not the same bug.", "right": "issue:44222"}, {"accept": false, "left": "issue:42994", "reason": "Both concern model saving/loading, but `quantized model saving failed` and `extra_state` handling are distinct serialization failures.", "right": "issue:44164"}, {"accept": false, "left": "issue:36010", "reason": "Different import targets (`GenerationMixin` vs `AutoImageProcessor`) and different failure points.", "right": "issue:41628"}, {"accept": false, "left": "issue:43525", "reason": "Both are config/schema issues, but they involve different models and different missing fields; not the same underlying defect.", "right": "issue:45375"}, {"accept": false, "left": "issue:45310", "reason": "Both are `from_pretrained` errors, but they affect different model families and likely different loaders/failure causes.", "right": "issue:45397"}, {"accept": false, "left": "issue:43673", "reason": "One is a missing GenerationMixin cache in chunked prefill, the other is attention-kernel divergence under `torch.compile`; unrelated code paths.", "right": "issue:44188"}, {"accept": false, "left": "issue:30990", "reason": "A loading hang for Sentence Transformers is unrelated to embedding reinitialization after resizing token embeddings.", "right": "issue:35141"}, {"accept": false, "left": "issue:43232", "reason": "Generation kwargs handling after `sync_gpus` is unrelated to a Janus image-generation test failure.", "right": "issue:44792"}], "summary": "This cluster is not a duplicate set: the items span unrelated import errors, generation/cache bugs, config-field regressions, and serialization/saving issues. All soft edges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:41628|issue:44351", "issue:44222|issue:45310", "issue:42831|issue:44222", "issue:42994|issue:44164", "issue:36010|issue:41628", "issue:43525|issue:45375", "issue:45310|issue:45397", "issue:43673|issue:44188", "issue:30990|issue:35141", "issue:43232|issue:44792"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6964, "estimated_input_tokens": 3354, "item_count": 18, "node_count": 18, "serialized_chars": 13415, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:06:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3d2d262e8e1d3caf6c2045c51467bf557b6ea294c57bc0acd5d15833b0dce755", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42175", "issue:42898", "issue:43377", "issue:43479", "issue:43784", "issue:43824", "issue:43976", "issue:44038", "issue:44060", "issue:44079", "issue:44188", "issue:44242", "issue:44373", "issue:44568", "issue:44869", "issue:44933", "issue:45125", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43377", "reason": "Different failure modes in different model paths: MIMI batching/padding-mask output divergence vs MoE router load-balancing loss gating.", "right": "issue:44242"}, {"accept": false, "left": "issue:43479", "reason": "Both mention None-handling, but one is multimodal config default initialization and the other is ModelOutput key assignment logic; not the same code path.", "right": "issue:44079"}, {"accept": false, "left": "issue:44038", "reason": "Both involve Qwen VL/MoE models, but the reported breakages are on different model variants and different version ranges, with no clear shared root cause.", "right": "issue:45310"}, {"accept": false, "left": "issue:42175", "reason": "Unrelated topics: pip extras/backend dependency installation vs a missing import from image_utils.", "right": "issue:44933"}, {"accept": false, "left": "issue:43479", "reason": "Config defaulting bug in Phi4MultimodalConfig vs Whisper word-timestamp decode crash; unrelated subsystems.", "right": "issue:44869"}, {"accept": false, "left": "issue:42898", "reason": "Both are tokenizer/behavior regressions, but one is about clean_up_tokenization_spaces and the other about add_special_tokens/BOS-EOS insertion in a specific tokenizer.", "right": "issue:44568"}, {"accept": false, "left": "issue:43976", "reason": "Python version compatibility issue vs missing image_utils import; no shared bug.", "right": "issue:44933"}, {"accept": false, "left": "issue:43784", "reason": "Import-time failures with different causes: missing nn in sentence-transformers integration vs missing Qwen2.5 VL export.", "right": "issue:43824"}, {"accept": false, "left": "issue:44188", "reason": "torch.compile attention-kernel divergence is unrelated to a wrong docstring for position_ids.", "right": "issue:44373"}, {"accept": false, "left": "issue:44060", "reason": "Both touch Qwen3-family models, but one is a tied-weights warning and the other is missing tensor-parallel plan metadata; different defects.", "right": "issue:45125"}], "summary": "The cluster is heterogeneous: none of the soft pairs appear to be the same underlying bug or change, so there is no clear duplicate group or canonical artifact here."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43377|issue:44242", "issue:43479|issue:44079", "issue:44038|issue:45310", "issue:42175|issue:44933", "issue:43479|issue:44869", "issue:42898|issue:44568", "issue:43976|issue:44933", "issue:43784|issue:43824", "issue:44188|issue:44373", "issue:44060|issue:45125"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7184, "estimated_input_tokens": 3464, "item_count": 18, "node_count": 18, "serialized_chars": 13855, "soft_pair_count": 11}, "cached_at": "2026-04-14T21:06:36Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2dfb78da1cd958b8bd019f072869ce95fce0f410a177e9e289db1409ee60b2c9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42898", "issue:43278", "issue:43723", "issue:43874", "issue:44077", "issue:44242", "issue:44448", "issue:44568", "issue:44623", "issue:44734", "issue:44779", "issue:44964", "issue:45092", "issue:45103", "issue:45137", "issue:45276", "issue:45310", "issue:45356"], "result": {"analyst_result": {"best_issue_reason": "issue:42898 is the best representative only in the sense that it is the most general v5 regression umbrella; it is not a precise duplicate target.", "best_pr_reason": null, "canonical_issue_reason": "issue:42898 is the broadest umbrella for v5 behavior changes, but it does not match the specific bugs in the other issues.", "canonical_pr_reason": null, "confidence": 0.31, "soft_edge_verdicts": [{"accept": false, "left": "issue:44964", "reason": "Different failures: model loading for Phi-4 multimodal vs DeepSpeed ZeRO3 deque IndexError.", "right": "issue:45137"}, {"accept": false, "left": "issue:44734", "reason": "Serve/KV-cache tensor indexing bug and auto-docstring annotations crash are unrelated code paths.", "right": "issue:45103"}, {"accept": false, "left": "issue:43723", "reason": "Both are tokenizer regressions, but for different models and different symptoms; not the same underlying bug.", "right": "issue:44568"}, {"accept": false, "left": "issue:42898", "reason": "Generic v5 tokenizer behavior change vs Kimi-K2.5 codec/regex regression; too different to merge.", "right": "issue:45356"}, {"accept": false, "left": "issue:43278", "reason": "Embedding dtype mismatch in eval is unrelated to ZeRO3 deque underflow.", "right": "issue:45137"}, {"accept": false, "left": "issue:44623", "reason": "Processor save-pretrained file omissions and Qwen3.5 from_pretrained failure are different load/save problems.", "right": "issue:45310"}, {"accept": false, "left": "issue:43874", "reason": "Missing image-patch method and remote-code/meta-init incompatibility are distinct multimodal issues.", "right": "issue:45092"}, {"accept": false, "left": "issue:42898", "reason": "Both mention tokenizer behavior in v5, but they concern different tokenizers and different regressions.", "right": "issue:44779"}, {"accept": false, "left": "issue:44077", "reason": "PatchTSMixer post_init API restriction and MoE router-loss omission are unrelated model behavior changes.", "right": "issue:44242"}, {"accept": false, "left": "issue:42898", "reason": "A broad v5 output-change report is not the same bug as Pegasus-specific output divergence.", "right": "issue:44448"}, {"accept": false, "left": "issue:45137", "reason": "ZeRO3 deque error and Gemma4 resize_token_embeddings propagation bug do not share a concrete code path.", "right": "issue:45276"}], "summary": "These are mostly unrelated regression reports grouped only by broad Transformers v5 context; I don\u2019t see any true duplicate pairs among the soft candidates. The closest thing to a cluster-wide umbrella is the general v5 behavior-change issue, but it is not a duplicate of the others."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44964|issue:45137", "issue:44734|issue:45103", "issue:43723|issue:44568", "issue:42898|issue:45356", "issue:43278|issue:45137", "issue:44623|issue:45310", "issue:43874|issue:45092", "issue:42898|issue:44779", "issue:44077|issue:44242", "issue:42898|issue:44448", "issue:45137|issue:45276"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6634, "estimated_input_tokens": 3189, "item_count": 17, "node_count": 17, "serialized_chars": 12755, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:07:09Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9ec99ff313efef4311d5d6491977aeb5786401337ff0c8289994bfa5e551423a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42831", "issue:42898", "issue:43295", "issue:43479", "issue:43644", "issue:43723", "issue:44206", "issue:44222", "issue:44263", "issue:44297", "issue:44360", "issue:44483", "issue:44484", "issue:44623", "issue:44977", "issue:45103", "issue:45406"], "result": {"analyst_result": {"best_issue_reason": "If forced to pick the closest hub, issue:44623 is the most general save_pretrained serialization report, but it still does not subsume the other bugs.", "best_pr_reason": null, "canonical_issue_reason": "No clear canonical issue: the cluster is heterogeneous across tokenizer behavior, multimodal config, serve/runtime crashes, FP8 serialization, and DSA indexer bugs.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:42898", "reason": "Both are v5/tokenizer regressions, but one is clean_up_tokenization_spaces behavior and the other is processor.tokenizer/image handling; different bugs.", "right": "issue:43295"}, {"accept": false, "left": "issue:43479", "reason": "Phi4MultimodalConfig None-handling is unrelated to non-persistent buffer junk during v5 model loading.", "right": "issue:43644"}, {"accept": false, "left": "issue:44206", "reason": "LasrFeatureExtractor center-arg crash and _process_kwargs_parameters AttributeError are separate code paths with different failure modes.", "right": "issue:45103"}, {"accept": false, "left": "issue:44222", "reason": "FP8 MoE save_pretrained bug and tokenizer_class mismatch in tokenizer_config.json are both save_pretrained-related but not the same underlying defect.", "right": "issue:44297"}, {"accept": false, "left": "issue:44222", "reason": "One concerns FP8 MoE model saving; the other is a max_shard_size default question. Same API, different issue.", "right": "issue:44484"}, {"accept": false, "left": "issue:44483", "reason": "OpenAI-style /v1/chat/completions request rejection is unrelated to Gemma4Processor missing _tokenizer in serve.", "right": "issue:45406"}, {"accept": false, "left": "issue:42831", "reason": "FineGrainedFP8 accuracy loss is a model-quality bug, while processor.save_pretrained missing files is a serialization output bug.", "right": "issue:44623"}, {"accept": false, "left": "issue:44222", "reason": "Both mention save_pretrained, but FP8 MoE saving and processor file omission are different concrete failures.", "right": "issue:44623"}, {"accept": false, "left": "issue:44263", "reason": "Both involve the DSA indexer, but torch.split return values and missing ReLU describe distinct fixes.", "right": "issue:44360"}, {"accept": false, "left": "issue:43723", "reason": "Tokenizer loading in v5 and Qwen3.5 flash-attention generation failure are different regressions despite both affecting model use.", "right": "issue:44977"}], "summary": "All soft links look like false positives: the issues share broad terms like v5, tokenizer, or save_pretrained, but they describe different concrete regressions and code paths. No single duplicate canonical emerges from this set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42898|issue:43295", "issue:43479|issue:43644", "issue:44206|issue:45103", "issue:44222|issue:44297", "issue:44222|issue:44484", "issue:44483|issue:45406", "issue:42831|issue:44623", "issue:44222|issue:44623", "issue:44263|issue:44360", "issue:43723|issue:44977"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6970, "estimated_input_tokens": 3357, "item_count": 18, "node_count": 18, "serialized_chars": 13425, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:08:18Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3b660638696dac22ad3db95f2a95cca35035851f6819f44c2f3c7d3975404b29", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:22355", "issue:28282", "issue:43479", "issue:43606", "issue:43723", "issue:44261", "issue:44263", "issue:44297", "issue:44373", "issue:44485", "issue:44514", "issue:44623", "issue:44871", "issue:44913", "issue:44991", "issue:45092", "issue:45103", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": "issue:43723 is the best single issue to anchor the set because it is a clear, general regression with a core API surface and likely relates to multiple downstream tokenizer-loading complaints.", "best_pr_reason": null, "canonical_issue_reason": "issue:43723 is the broadest and most central regression report in the set: it describes a v5 tokenizer loading failure at the core AutoTokenizer.from_pretrained path, which is more representative than the model-specific or config-specific reports.", "canonical_pr_reason": null, "confidence": 0.86, "soft_edge_verdicts": [{"accept": false, "left": "issue:22355", "reason": "Both are import errors, but they fail for different missing components: transformers.onnx vs PyTorch for AutoModel. Not the same bug or code path.", "right": "issue:28282"}, {"accept": false, "left": "issue:44263", "reason": "Different model families and different failure modes: torch.split return handling in GLM MoE indexing vs meta-init incompatibility in InternVL2.", "right": "issue:45092"}, {"accept": false, "left": "issue:44514", "reason": "One is a processor batching crash in Qwen2.5-VL; the other is an auto-docstring AttributeError triggered by future annotations. Unrelated code paths.", "right": "issue:45103"}, {"accept": false, "left": "issue:44297", "reason": "Both mention tokenizer/model loading, but one is save_pretrained metadata mismatch and the other is a Qwen3.5 MoE from_pretrained regression. Too different to treat as the same fix.", "right": "issue:45310"}, {"accept": false, "left": "issue:43606", "reason": "CPU offload device mismatch for bark-small is unrelated to processor.save_pretrained missing files.", "right": "issue:44623"}, {"accept": false, "left": "issue:43479", "reason": "Both concern config round-tripping/defaults, but they involve different model configs and different symptoms; not the same underlying bug.", "right": "issue:44913"}, {"accept": false, "left": "issue:43723", "reason": "Tokenizer loading regression vs eos_token_id config mismatch; related to model loading broadly, but not the same concrete issue.", "right": "issue:44871"}, {"accept": false, "left": "issue:44261", "reason": "Different models and failures: missing rms_norm_eps precision issue vs old InternVL2 remote-code/meta-init incompatibility.", "right": "issue:45092"}, {"accept": false, "left": "issue:43723", "reason": "Both are tokenizer-loading complaints, but one is a general v5 AutoTokenizer regression and the other is a specific EMBEDDIA/est-roberta failure. Not enough evidence they are the same bug.", "right": "issue:44991"}, {"accept": false, "left": "issue:44373", "reason": "Docstring text error vs RoPE implementation discussion. They are not the same bug or change.", "right": "issue:44485"}], "summary": "The items are mostly unrelated issues with only superficial topical overlap. All soft-edge pairs should be rejected; none appear to describe the same underlying bug or fixable code path. The best representative issue is the broad tokenizer-loading regression in Transformers v5."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:22355|issue:28282", "issue:44263|issue:45092", "issue:44514|issue:45103", "issue:44297|issue:45310", "issue:43606|issue:44623", "issue:43479|issue:44913", "issue:43723|issue:44871", "issue:44261|issue:45092", "issue:43723|issue:44991", "issue:44373|issue:44485"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6702, "estimated_input_tokens": 3223, "item_count": 18, "node_count": 18, "serialized_chars": 12891, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:08:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5db36c7429d1d011386745237a70d72ba31eb62d46c9d3675039734a5927bc8a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30990", "issue:38175", "issue:41628", "issue:42831", "issue:42886", "issue:42994", "issue:43576", "issue:43723", "issue:43784", "issue:43824", "issue:44263", "issue:44295", "issue:44373", "issue:44442", "issue:44623", "issue:45092", "issue:45310", "issue:45357"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.82, "soft_edge_verdicts": [{"accept": false, "left": "issue:45092", "reason": "Both involve Qwen/vision checkpoint handling, but one is meta-init incompatibility for old InternVL2 remote-code checkpoints and the other is a save_pretrained regression with wrong visual encoder keys; different failure modes.", "right": "issue:45357"}, {"accept": false, "left": "issue:42886", "reason": "Offline tokenizer cache loading and quantized model saving are unrelated code paths and symptoms.", "right": "issue:42994"}, {"accept": false, "left": "issue:43723", "reason": "Both are tokenizer-loading problems, but they affect different tokenizer classes and likely distinct registration/load issues.", "right": "issue:44442"}, {"accept": false, "left": "issue:30990", "reason": "Sentence-Transformers loading hang and zero probabilities on SigLIP2 are unrelated model/runtime bugs.", "right": "issue:38175"}, {"accept": false, "left": "issue:41628", "reason": "Both are import errors, but for different missing symbols and different model families; not the same underlying bug.", "right": "issue:43824"}, {"accept": false, "left": "issue:44295", "reason": "Position_ids buffer access and processor.save_pretrained missing files are separate issues in different parts of the stack.", "right": "issue:44623"}, {"accept": false, "left": "issue:42831", "reason": "FineGrainedFP8 accuracy regression and Qwen3.5 from_pretrained failure are not the same problem.", "right": "issue:45310"}, {"accept": false, "left": "issue:43576", "reason": "Broken env command in v5 and sentence-transformers import NameError are different regressions, even if both mention v5.", "right": "issue:43784"}, {"accept": false, "left": "issue:44263", "reason": "A torch.split shape/return-value issue in GlmMoeDsaIndexer is unrelated to a docstring typo about position_ids.", "right": "issue:44373"}], "summary": "These are superficially similar Transformers-related reports, but the soft pairs point to different bugs/code paths rather than duplicates. I would keep them separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:45092|issue:45357", "issue:42886|issue:42994", "issue:43723|issue:44442", "issue:30990|issue:38175", "issue:41628|issue:43824", "issue:44295|issue:44623", "issue:42831|issue:45310", "issue:43576|issue:43784", "issue:44263|issue:44373"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6834, "estimated_input_tokens": 3289, "item_count": 18, "node_count": 18, "serialized_chars": 13155, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:09:29Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9f2b188a344c7bf4cff9328dda1eac8545ab96edec30b56d1e8bd5c6241afe54", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30990", "issue:33357", "issue:42371", "issue:42831", "issue:42886", "issue:42907", "issue:43066", "issue:43122", "issue:43232", "issue:44038", "issue:44117", "issue:44263", "issue:44514", "issue:44521", "issue:44704", "issue:44945", "issue:45245", "issue:45290"], "result": {"analyst_result": {"best_issue_reason": "44521 is the best standalone issue to keep visible: it is open, specific, and likely to attract/collect follow-up reports around the same chat-template path.", "best_pr_reason": null, "canonical_issue_reason": "If one issue must anchor the set, 44521 is the strongest representative because it is active, has the most inbound references, and describes a concrete `apply_chat_template` multimodal failure with clear reproduction context.", "canonical_pr_reason": null, "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "issue:43122", "reason": "Both involve tokenization/chat-template behavior, but one is a version-to-version tokenization change and the other is zeroed assistant masks for multimodal inputs; different bugs and code paths.", "right": "issue:44521"}, {"accept": false, "left": "issue:42886", "reason": "Both are tokenizer-related, but one is offline cache loading failure and the other is wrong tokenizer decoder type in v5; not the same underlying defect.", "right": "issue:43066"}, {"accept": false, "left": "issue:42831", "reason": "These concern different model/precision issues: FineGrainedFP8 accuracy vs a Qwen3-VL-Moe bug in Transformers 5.0.", "right": "issue:44038"}, {"accept": false, "left": "issue:44117", "reason": "One is a tokenizer mapping assumption in `from_pretrained`; the other is `AutoProcessor.from_pretrained` dropping kwargs to `cached_file`. Similar area, different failure mode.", "right": "issue:44704"}, {"accept": false, "left": "issue:30990", "reason": "Completely different user-facing failures: Sentence Transformers loading hang vs MacOS bus error with a CLIP model.", "right": "issue:33357"}, {"accept": false, "left": "issue:43122", "reason": "Version-dependent tokenization mismatch and a batched `apply_chat_template` crash with `padding=False` are distinct bugs.", "right": "issue:44514"}, {"accept": false, "left": "issue:42371", "reason": "TF32 configuration guidance and a `torch.split()` return-value issue in a model-specific indexer are unrelated.", "right": "issue:44263"}, {"accept": false, "left": "issue:42907", "reason": "Saving dequantized models and incorrect pipeline-parallel output are different subsystems and failure modes.", "right": "issue:44945"}, {"accept": false, "left": "issue:44704", "reason": "Both touch processor/chat-template flows, but one is missing kwargs in `cached_file` while the other is a crash on assistant messages with tool calls and no content.", "right": "issue:45290"}, {"accept": false, "left": "issue:43232", "reason": "Generation sync/update ordering and a categorical cardinality limit error are unrelated.", "right": "issue:45245"}], "summary": "These issues are mostly unrelated one-off bugs across tokenization, chat templating, generation, processor loading, and model-specific regressions. There isn\u2019t a strong duplicate cluster here, so no soft pair should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43122|issue:44521", "issue:42886|issue:43066", "issue:42831|issue:44038", "issue:44117|issue:44704", "issue:30990|issue:33357", "issue:43122|issue:44514", "issue:42371|issue:44263", "issue:42907|issue:44945", "issue:44704|issue:45290", "issue:43232|issue:45245"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6772, "estimated_input_tokens": 3258, "item_count": 18, "node_count": 18, "serialized_chars": 13031, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:09:52Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8769954b511fd9595d08fadf5e939a7d8f86ec7c89120dd3f4e88296acc5cde8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:39401", "issue:41628", "issue:42907", "issue:43116", "issue:43122", "issue:43232", "issue:43408", "issue:43475", "issue:43937", "issue:43976", "issue:44488", "issue:44625", "issue:44743", "issue:44779", "issue:44829", "issue:44945", "issue:45200", "issue:45278"], "result": {"analyst_result": {"best_issue_reason": "issue:45278 is the most general and highest-level report here; the others are narrower model- or feature-specific bugs that do not unify the cluster.", "best_pr_reason": null, "canonical_issue_reason": "issue:45278 is the broadest, most general user-facing regression report in the set (multiple import failures after upgrading), so it is the best representative issue if a single anchor is needed.", "canonical_pr_reason": null, "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "issue:43976", "reason": "Both are runtime regressions, but one is a Python-version incompatibility/import issue and the other is an incorrect-generation-output issue; different code paths and symptoms.", "right": "issue:44945"}, {"accept": false, "left": "issue:43232", "reason": "Both involve generation state, but one is about sync_gpus updating kwargs and the other is about recurrent states being reset in modular Qwen3.5; not the same bug.", "right": "issue:44743"}, {"accept": false, "left": "issue:39401", "reason": "Both mention tokenizer-related problems, but one is wrong offset mappings for Qwen3 and the other is failing to load a specific model; too different in concrete failure mode.", "right": "issue:44488"}, {"accept": false, "left": "issue:41628", "reason": "45278 is a broad upgrade/import regression, but 41628 is a specific missing AutoImageProcessor import; similar theme, not the same underlying bug.", "right": "issue:45278"}, {"accept": false, "left": "issue:42907", "reason": "Dequantized model save failure and invalid GenerationConfig are unrelated subsystems with different failure signatures.", "right": "issue:43937"}, {"accept": false, "left": "issue:44625", "reason": "One is a config propagation bug in Qwen3.5 classification setup; the other is a training degeneration with flash_attention_3. Different bugs and likely different fixes.", "right": "issue:44829"}, {"accept": false, "left": "issue:43116", "reason": "Example-script multi-label output handling and Gemma 4 multimodal token-type defaults are unrelated training/setup issues.", "right": "issue:45200"}, {"accept": false, "left": "issue:43408", "reason": "Both are SAM 3 video related, but one is a model-type mismatch warning and the other is a missing attribute on the vision encoder output; not the same code-path problem.", "right": "issue:43475"}, {"accept": false, "left": "issue:43122", "reason": "Both are tokenizer regressions in v5, but they affect different model families and different tokenization behaviors; not mergeable as one duplicate.", "right": "issue:44779"}], "summary": "No soft pair looks like the same underlying bug/change; the cluster is mostly a set of unrelated model-, tokenizer-, import-, and generation-regression reports. I\u2019d use the broad upgrade/import regression as the most representative issue, but only as a loose anchor, not a true duplicate target."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43976|issue:44945", "issue:43232|issue:44743", "issue:39401|issue:44488", "issue:41628|issue:45278", "issue:42907|issue:43937", "issue:44625|issue:44829", "issue:43116|issue:45200", "issue:43408|issue:43475", "issue:43122|issue:44779"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6422, "estimated_input_tokens": 3083, "item_count": 17, "node_count": 17, "serialized_chars": 12329, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:10:13Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "80cbf3b7c6345fbfbb036cca55c2ae3048550754b716b6307258ffc1d55cabec", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:29127", "issue:30990", "issue:39401", "issue:41628", "issue:42371", "issue:42831", "issue:43550", "issue:43992", "issue:44261", "issue:44295", "issue:44297", "issue:44360", "issue:44373", "issue:44485", "issue:44521", "issue:44704", "issue:44987"], "result": {"analyst_result": {"best_issue_reason": "issue:44521 is the best global issue candidate by activity and inbound references, but it should be treated only as a stand-in since the surrounding issues are unrelated.", "best_pr_reason": null, "canonical_issue_reason": "issue:44521 is the most discussion-heavy open issue and the closest thing to a cluster anchor, but the overall set is heterogeneous, so this is an arbitrary representative rather than a true canonical duplicate.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43992", "reason": "Both involve loading/from_pretrained, but one is a missing model weight in UMT5Encoder and the other is AutoProcessor kwargs passing to cached_file; different code paths and failures.", "right": "issue:44704"}, {"accept": false, "left": "issue:44360", "reason": "DSA indexer/ReLU and GLM-5 RoPE implementation are unrelated model internals with no shared bug mechanism.", "right": "issue:44485"}, {"accept": false, "left": "issue:29127", "reason": "One is a LayoutLMv3 box-annotation error message issue, the other is Sentence Transformers hanging on load; not the same problem.", "right": "issue:30990"}, {"accept": false, "left": "issue:44295", "reason": "Reading a position_ids buffer and a tokenizer_class mismatch in tokenizer_config.json are distinct tokenizer/model serialization bugs.", "right": "issue:44297"}, {"accept": false, "left": "issue:41628", "reason": "AutoImageProcessor import failure and a transformers>=5.1.0 model load failure are different loading issues affecting different APIs.", "right": "issue:44987"}, {"accept": false, "left": "issue:39401", "reason": "Wrong offset_mapping and zero assistant_masks both touch tokenization/template logic, but they affect different outputs and code paths.", "right": "issue:44521"}, {"accept": false, "left": "issue:44261", "reason": "Missing rms_norm_eps causing precision error is a runtime config bug; wrong docstring for position_ids is documentation only.", "right": "issue:44373"}, {"accept": false, "left": "issue:42371", "reason": "TF32 behavior settings and FineGrainedFP8 accuracy are separate numerical-precision concerns with different fixes.", "right": "issue:42831"}, {"accept": false, "left": "issue:43550", "reason": "torch.compile/SDPA failure in Bamba-9B-v2 is unrelated to a position_ids buffer read error.", "right": "issue:44295"}], "summary": "These items do not form a coherent duplicate cluster: they span unrelated issues in loading, tokenization, model internals, configuration, docs, and runtime behavior. None of the soft pairs look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43992|issue:44704", "issue:44360|issue:44485", "issue:29127|issue:30990", "issue:44295|issue:44297", "issue:41628|issue:44987", "issue:39401|issue:44521", "issue:44261|issue:44373", "issue:42371|issue:42831", "issue:43550|issue:44295"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7022, "estimated_input_tokens": 3383, "item_count": 18, "node_count": 18, "serialized_chars": 13532, "soft_pair_count": 11}, "cached_at": "2026-04-14T21:10:40Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e912f194997e7789c0a0fe328a6eba2548748aa35d9575eea8f60cac323270fa", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36296", "issue:42222", "issue:42907", "issue:43116", "issue:43122", "issue:43278", "issue:43441", "issue:43452", "issue:43824", "issue:43927", "issue:44038", "issue:44222", "issue:44295", "issue:44297", "issue:44351", "issue:44912", "issue:44945", "issue:44977"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "issue:42907", "reason": "Different failure modes: saving dequantized Ministral/Devstral weights vs gguf_file breaking AutoTokenizer/AutoModelForCausalLM loading.", "right": "issue:43452"}, {"accept": false, "left": "issue:36296", "reason": "Tensor-parallel training bug and multi-label classification example returning empty results are unrelated.", "right": "issue:43116"}, {"accept": false, "left": "issue:43927", "reason": "One is custom token IDs lost on save/load; the other is position_ids access after registering as a buffer. Different code paths and symptoms.", "right": "issue:44295"}, {"accept": false, "left": "issue:44038", "reason": "Qwen3-VL-Moe / Transformers 5.0 compatibility is not the same bug as tokenizer_class mismatch in tokenizer_config.json.", "right": "issue:44297"}, {"accept": false, "left": "issue:43824", "reason": "Both are import errors, but for different symbols and likely different export/packaging issues.", "right": "issue:44351"}, {"accept": false, "left": "issue:44038", "reason": "Both mention MoE, but one is a model/version compatibility bug and the other is FP8 save_pretrained behavior; not the same concrete issue.", "right": "issue:44222"}, {"accept": false, "left": "issue:43122", "reason": "Tokenizer behavior changed across versions vs Qwen3.5 flash-attention generation failure; different subsystems and symptoms.", "right": "issue:44977"}, {"accept": false, "left": "issue:43278", "reason": "Embedding dtype mismatch in evaluate vs incorrect output under pipeline parallelism are distinct runtime bugs.", "right": "issue:44945"}, {"accept": false, "left": "issue:42222", "reason": "VitPose model breakage and Ministral-3 FlashAttention failure are model-specific and unrelated.", "right": "issue:43441"}, {"accept": false, "left": "issue:42907", "reason": "Both concern model saving/quantization, but dequantized save failure and FP8 save_pretrained are different concrete bugs.", "right": "issue:44222"}, {"accept": false, "left": "issue:42907", "reason": "Dequantized Ministral/Devstral save failure is not the same as MXFP4 loading falling back to bf16.", "right": "issue:44912"}], "summary": "This cluster is noisy and heterogeneous: it mixes unrelated bugs across tokenization, save/load, quantization, imports, FlashAttention, parallelism, and model-specific regressions. I do not see a single underlying duplicate issue or PR to canonicalize."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42907|issue:43452", "issue:36296|issue:43116", "issue:43927|issue:44295", "issue:44038|issue:44297", "issue:43824|issue:44351", "issue:44038|issue:44222", "issue:43122|issue:44977", "issue:43278|issue:44945", "issue:42222|issue:43441", "issue:42907|issue:44222", "issue:42907|issue:44912"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6778, "estimated_input_tokens": 3261, "item_count": 18, "node_count": 18, "serialized_chars": 13041, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:10:51Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e3b952252f22f8f1d26d07d11627f0e376d8e7b6f4510498c6a5147d27cada67", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:22355", "issue:29127", "issue:38617", "issue:42371", "issue:42673", "issue:43064", "issue:43116", "issue:43493", "issue:43673", "issue:43784", "issue:43874", "issue:43937", "issue:44075", "issue:44485", "issue:44933", "issue:44936", "issue:45072", "issue:45278"], "result": {"analyst_result": {"best_issue_reason": "issue:43064 is the strongest issue candidate because it is specific, reproducible, and describes a clearly bounded code-path problem.", "best_pr_reason": null, "canonical_issue_reason": "issue:43064 is the most concrete and self-contained bug report here: it names a precise training setup (v5 + FSDP2 + PEFT + cpu_ram_efficient_loading=True) and a specific failure mode (wrong optimizer states/params on non-zero ranks).", "canonical_pr_reason": null, "confidence": 0.87, "soft_edge_verdicts": [{"accept": false, "left": "issue:38617", "reason": "Both are import-related, but one is a specific missing symbol and the other is a broad post-upgrade import breakage; not the same underlying bug.", "right": "issue:45278"}, {"accept": false, "left": "issue:43784", "reason": "Both mention import errors, but they concern different modules and different failures; too broad to be the same bug.", "right": "issue:44933"}, {"accept": false, "left": "issue:43064", "reason": "Training optimizer-state corruption under FSDP2/PEFT is unrelated to chunked-prefill GenerationMixin cache handling.", "right": "issue:43673"}, {"accept": false, "left": "issue:42673", "reason": "VRAM leakage in multi-threaded dataloaders and bfloat16 dtype mismatches are different runtime problems in different code paths.", "right": "issue:45072"}, {"accept": false, "left": "issue:43937", "reason": "Same model family, but one is a GenerationConfig validation issue and the other is a RoPE implementation discussion; not the same bug.", "right": "issue:44485"}, {"accept": false, "left": "issue:44075", "reason": "Optimizer argument handling and trainer evaluate-after-train failure are distinct behaviors with no clear shared code defect.", "right": "issue:44936"}, {"accept": false, "left": "issue:43116", "reason": "A multi-label example script bug and a SigLIP2 implementation discrepancy are unrelated issues.", "right": "issue:43493"}, {"accept": false, "left": "issue:42371", "reason": "TF32 API migration guidance is unrelated to a missing image processor method causing multimodal token counting failures.", "right": "issue:43874"}, {"accept": false, "left": "issue:22355", "reason": "An ONNX import failure and a LayoutLMv3 error-message clarity issue are not the same underlying bug.", "right": "issue:29127"}], "summary": "No soft pair looks like the same underlying bug/change closely enough to merge. Most pairs share only a broad area (imports, training, GLM, etc.) but describe different failures. I selected one highly specific, concrete issue as the canonical/best representative."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:38617|issue:45278", "issue:43784|issue:44933", "issue:43064|issue:43673", "issue:42673|issue:45072", "issue:43937|issue:44485", "issue:44075|issue:44936", "issue:43116|issue:43493", "issue:42371|issue:43874", "issue:22355|issue:29127"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6894, "estimated_input_tokens": 3319, "item_count": 18, "node_count": 18, "serialized_chars": 13273, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:12:45Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "939992e3ebbb6de639a95c8b1f4598e0bb95038ac77fd99decfea0bceef4d02b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33453", "issue:38617", "issue:39401", "issue:41628", "issue:42673", "issue:43116", "issue:43421", "issue:43450", "issue:43825", "issue:43906", "issue:43931", "issue:43937", "issue:43986", "issue:44484", "issue:44779", "issue:44945", "issue:44977", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": "39401 has a specific, reproducible symptom (wrong offset_mapping) and a narrow scope, making it the strongest standalone issue in this set.", "best_pr_reason": null, "canonical_issue_reason": "Issue 39401 is the clearest concrete bug report in the cluster, but only as a representative of the tokenizer-regression subset; the overall cluster is too mixed to treat as one duplicate set.", "canonical_pr_reason": null, "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "issue:43116", "reason": "Different problems: example script returns empty multi-label outputs vs a pipeline() v5 warning/message issue. Same broad area, not the same bug.", "right": "issue:43825"}, {"accept": false, "left": "issue:39401", "reason": "Both are tokenizer regressions, but they affect different models and symptoms (offset_mapping vs incorrect results). Not enough evidence of one underlying defect.", "right": "issue:44779"}, {"accept": false, "left": "issue:33453", "reason": "Tokenizer loading regression and Qwen3 offset_mapping bug are distinct failure modes.", "right": "issue:39401"}, {"accept": false, "left": "issue:43931", "reason": "Model weight-shape mismatch on load is unrelated to incorrect output under pipeline parallelism.", "right": "issue:44945"}, {"accept": false, "left": "issue:43937", "reason": "GenerationConfig validation error and flash-attention generation failure are different code paths and symptoms.", "right": "issue:44977"}, {"accept": false, "left": "issue:44484", "reason": "A save_pretrained shard-size question is unrelated to a Qwen3.5 from_pretrained loading error.", "right": "issue:45310"}, {"accept": false, "left": "issue:42673", "reason": "VRAM leak in multi-thread dataloading is not shown to be the same as the isolated reproduction of issue 38071.", "right": "issue:43906"}, {"accept": false, "left": "issue:39401", "reason": "Both touch tokenizer behavior, but one is a Qwen3 offset_mapping regression and the other is runtime special-token/post-processor updates. Different defects.", "right": "issue:43421"}, {"accept": false, "left": "issue:43450", "reason": "Incorrect batched video shape and a crash loading video models without torchvision are separate issues.", "right": "issue:43986"}, {"accept": false, "left": "issue:38617", "reason": "Both are import errors, but they concern different missing symbols and likely different regressions.", "right": "issue:41628"}], "summary": "This cluster is mostly heterogeneous: tokenizer regressions, model-loading import errors, generation/config issues, and processor bugs. The soft pairs are only superficially similar and do not look like the same underlying defect, so no merges are justified."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43116|issue:43825", "issue:39401|issue:44779", "issue:33453|issue:39401", "issue:43931|issue:44945", "issue:43937|issue:44977", "issue:44484|issue:45310", "issue:42673|issue:43906", "issue:39401|issue:43421", "issue:43450|issue:43986", "issue:38617|issue:41628"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6628, "estimated_input_tokens": 3186, "item_count": 17, "node_count": 17, "serialized_chars": 12743, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:13:33Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "512077f1cd29d809d0c3360f1e72e6b0e873c15ab5eb2e34192d387bb92e084f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38617", "issue:41628", "issue:42175", "issue:42222", "issue:42673", "issue:43577", "issue:43582", "issue:43784", "issue:44117", "issue:44220", "issue:44291", "issue:44410", "issue:44829", "issue:45071", "issue:45081", "issue:45216", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": "issue:45071 is the strongest representative if one issue must anchor the cluster: it targets a core regression with broad impact and clear version scope.", "best_pr_reason": null, "canonical_issue_reason": "issue:45071 is the broadest and most central regression-style report in the set (core PretrainedConfig type checking in v5.4.0), but the overall cluster is still too heterogeneous to treat as a true duplicate group.", "canonical_pr_reason": null, "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "issue:44291", "reason": "Both are v5.4.0-era loading regressions, but one is about init_empty_weights and an unexpected argument while the other is about PretrainedConfig type checking; different code paths and fixes.", "right": "issue:45071"}, {"accept": false, "left": "issue:44117", "reason": "Tokenizer mapping None handling and a Mistral regex patch crash are distinct bugs in different tokenizer paths.", "right": "issue:45081"}, {"accept": false, "left": "issue:42673", "reason": "Different models and failure modes: VRAM leak in Qwen3ForCausalLM threading vs dtype staying float32 in BLIP-2 loading.", "right": "issue:43577"}, {"accept": false, "left": "issue:44829", "reason": "FlashAttention-3 degenerate training and Qwen3.5 checkpoint save corruption are unrelated behavioral regressions.", "right": "issue:45216"}, {"accept": false, "left": "issue:44410", "reason": "Both concern Qwen3-family models, but one is missing projections in qwen3next layers while the other is a from_pretrained error for Qwen3.5 MoE; not the same bug.", "right": "issue:45310"}, {"accept": false, "left": "issue:42673", "reason": "A multithreaded Qwen3 VRAM leak is unrelated to an Apple Silicon TypeError in caching_allocator_warmup.", "right": "issue:43582"}, {"accept": false, "left": "issue:38617", "reason": "ImportError for layer_type_validation and a torch fbank extraction issue are different subsystems and symptoms.", "right": "issue:44220"}, {"accept": false, "left": "issue:42175", "reason": "Package extras/backend inclusion is unrelated to VitPose model breakage.", "right": "issue:42222"}, {"accept": false, "left": "issue:41628", "reason": "Both are import-time failures, but they affect different symbols and packages and point to different breakages.", "right": "issue:43784"}], "summary": "This cluster is heterogeneous: the linked issues cover unrelated import errors, model-loading regressions, tokenizer bugs, backend/package installation problems, and model-specific architecture issues. None of the soft pairs look like the same underlying bug, so no soft duplicates should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44291|issue:45071", "issue:44117|issue:45081", "issue:42673|issue:43577", "issue:44829|issue:45216", "issue:44410|issue:45310", "issue:42673|issue:43582", "issue:38617|issue:44220", "issue:42175|issue:42222", "issue:41628|issue:43784"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6586, "estimated_input_tokens": 3165, "item_count": 17, "node_count": 17, "serialized_chars": 12659, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:14:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "969e74aaf1b8eb746a639c4e7784f6a86c244f0862e2b26d647e7f3e5b6336d2", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42371", "issue:42617", "issue:43066", "issue:43097", "issue:43122", "issue:43408", "issue:43673", "issue:43742", "issue:43874", "issue:43937", "issue:44261", "issue:44493", "issue:44496", "issue:44933", "issue:44964", "issue:45278", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": "45278 is the best representative solely because it is broad, open, and phrased as a general post-upgrade failure rather than a narrow model-specific symptom.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45278 is the broadest open report and best fits the general upgrade/regression theme, but the cluster is too mixed to treat it as a true duplicate hub.", "canonical_pr_reason": null, "confidence": 0.56, "soft_edge_verdicts": [{"accept": false, "left": "issue:42371", "reason": "TF32 settings/API guidance is unrelated to MLA q_a_layernorm missing rms_norm_eps and the precision mismatch it causes.", "right": "issue:44261"}, {"accept": false, "left": "issue:44933", "reason": "Both mention imports, but 44933 is a specific missing symbol while 45278 is a broad upgrade-related import-error report; not the same bug.", "right": "issue:45278"}, {"accept": false, "left": "issue:43937", "reason": "GenerationConfig validation for GLM-5 and an unrecognized-model/config.json model_type error are different loading failures with different root causes.", "right": "issue:44496"}, {"accept": false, "left": "issue:43408", "reason": "A model-type warning for sam3_video vs sam3_tracker is unrelated to the MobileLLM key error; different models and error paths.", "right": "issue:43742"}, {"accept": false, "left": "issue:42617", "reason": "Running 3d_parallel.py and a bug in testing_utils.py are unrelated to the same underlying code-path.", "right": "issue:45341"}, {"accept": false, "left": "issue:43066", "reason": "Both are tokenizer-related regressions, but one is a decoder-type mismatch and the other is changed tokenization output; not clearly the same concrete defect.", "right": "issue:43122"}, {"accept": false, "left": "issue:44964", "reason": "Phi-4 multimodal load failure is a model-specific loading issue, not the same as the general import-error regression in 45278.", "right": "issue:45278"}, {"accept": false, "left": "issue:43673", "reason": "Chunked-prefill cache missing in GenerationMixin and a testing_utils bug are unrelated.", "right": "issue:45341"}, {"accept": false, "left": "issue:43097", "reason": "Removal of tie_embeddings_and_encoder_decoder and unexpected position-id keys are different API/config regressions.", "right": "issue:44493"}, {"accept": false, "left": "issue:43874", "reason": "Missing get_number_of_image_patches in an image processor and absent rms_norm_eps in MLA q_a_layernorm are different multimodal/config issues.", "right": "issue:44261"}], "summary": "The items are mostly a heterogeneous mix of Transformers v5 regressions (imports, tokenizer changes, model loading, multimodal/config errors) rather than one duplicate bug cluster. No soft edge pair looks like the same concrete underlying issue."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42371|issue:44261", "issue:44933|issue:45278", "issue:43937|issue:44496", "issue:43408|issue:43742", "issue:42617|issue:45341", "issue:43066|issue:43122", "issue:44964|issue:45278", "issue:43673|issue:45341", "issue:43097|issue:44493", "issue:43874|issue:44261"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6746, "estimated_input_tokens": 3245, "item_count": 18, "node_count": 18, "serialized_chars": 12979, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:14:41Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b7cf783bce4066135239e7e7676135f6a39a7b2b936b3952fb6c4d2abca9d8ab", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41628", "issue:42222", "issue:42994", "issue:43012", "issue:43097", "issue:43408", "issue:43441", "issue:43452", "issue:43784", "issue:43976", "issue:43986", "issue:44263", "issue:44484", "issue:44485", "issue:44829", "issue:45003", "issue:45071", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": "issue:45003 is the most suitable single representative of the cluster because it targets a foundational code path and is actionable as an unresolved bug.", "best_pr_reason": null, "canonical_issue_reason": "issue:45003 is the strongest representative because it is an open, core-runtime bug in modeling_utils with broader impact than the others, and it has meaningful discussion activity.", "canonical_pr_reason": null, "confidence": 0.93, "soft_edge_verdicts": [{"accept": false, "left": "issue:43986", "reason": "Different bugs: one is an AutoProcessor/video-model dependency crash, the other is a save_pretrained shard-size question.", "right": "issue:44484"}, {"accept": false, "left": "issue:43441", "reason": "Both mention FlashAttention, but one is a specific Ministral-3 failure and the other is a training degeneration issue for sequence classification; not the same concrete bug.", "right": "issue:44829"}, {"accept": false, "left": "issue:42222", "reason": "Unrelated: vitpose model loading/regression versus quantized model saving failure.", "right": "issue:42994"}, {"accept": false, "left": "issue:43012", "reason": "Both are warning reports, but they concern different model/type combinations and different code paths.", "right": "issue:43408"}, {"accept": false, "left": "issue:44263", "reason": "Different topics: a torch.split return-value issue in one model component versus a RoPE implementation discussion.", "right": "issue:44485"}, {"accept": false, "left": "issue:41628", "reason": "Import error for AutoImageProcessor is unrelated to PretrainedConfig type-checking breakage.", "right": "issue:45071"}, {"accept": false, "left": "issue:45003", "reason": "Both are utility-level bugs, but one is unsafe sys.modules access in modeling_utils and the other is a testing_utils bug; not the same defect.", "right": "issue:45341"}, {"accept": false, "left": "issue:43784", "reason": "A sentence-transformers import NameError is not the same as a Python-version compatibility regression.", "right": "issue:43976"}, {"accept": false, "left": "issue:43097", "reason": "Removing tie_embeddings_and_encoder_decoder and gguf_file.from_pretrained breakage are separate regressions with different failure modes.", "right": "issue:43452"}], "summary": "This cluster is a loose mix of unrelated Transformers issues with only superficial title similarity. None of the soft pairs look like the same underlying bug or change, so all suggested merges should be rejected."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43986|issue:44484", "issue:43441|issue:44829", "issue:42222|issue:42994", "issue:43012|issue:43408", "issue:44263|issue:44485", "issue:41628|issue:45071", "issue:45003|issue:45341", "issue:43784|issue:43976", "issue:43097|issue:43452"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7052, "estimated_input_tokens": 3398, "item_count": 18, "node_count": 18, "serialized_chars": 13589, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:15:13Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "086608f88dee883237a21defa30abfb9cc955dd8e1238674046e4449a4555f6a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42222", "issue:43335", "issue:43408", "issue:43874", "issue:43931", "issue:43986", "issue:44038", "issue:44117", "issue:44263", "issue:44368", "issue:44410", "issue:44496", "issue:44829", "issue:44987", "issue:45071", "issue:45245", "issue:45341", "issue:45372"], "result": {"analyst_result": {"best_issue_reason": "issue:43986 is the strongest standalone report to anchor triage because it has a clear reproducible crash path and multiple inbound references, but it should not absorb the other issues.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits this set; the reports span unrelated bugs rather than one duplicate problem.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:42222", "reason": "Different model families and failures: vitpose is reported broken, while Olmo-Hybrid-Instruct-SFT-7B is an unrecognized-model/config.json problem.", "right": "issue:44496"}, {"accept": false, "left": "issue:44117", "reason": "Both mention loading behavior, but one is a tokenizer mapping None bug and the other is a model-loading failure for a specific repo; not the same code path.", "right": "issue:44987"}, {"accept": false, "left": "issue:43931", "reason": "Both are Qwen-related, but one is a weight-shape mismatch during loading and the other is missing projections in layer 0 of qwen3next; distinct bugs.", "right": "issue:44410"}, {"accept": false, "left": "issue:43986", "reason": "The first is a video-model AutoProcessor/torchvision dependency crash; the second is a testing_utils bug. No common underlying defect.", "right": "issue:45341"}, {"accept": false, "left": "issue:43874", "reason": "Both touch GLM/MoE areas, but one is a missing processor method causing an AttributeError and the other is torch.split return-value handling in a different component.", "right": "issue:44263"}, {"accept": false, "left": "issue:43408", "reason": "These are unrelated warnings: sam3 tracker/video model mismatch versus tie_word_embeddings config warning for Qwen3.5 fine-tuning.", "right": "issue:44368"}, {"accept": false, "left": "issue:43335", "reason": "SwitchTransformers sparse-layer construction and PretrainedConfig type checking are separate problems in different subsystems.", "right": "issue:45071"}, {"accept": false, "left": "issue:43986", "reason": "One is missing torchvision for video processor loading; the other is an import error for ReasoningEffort affecting Gemma 4 processor loading. Different dependencies and failure modes.", "right": "issue:45372"}, {"accept": false, "left": "issue:43931", "reason": "Both are Qwen3-VL variants, but one is a shape mismatch on load and the other concerns transformers 5.0 behavior with Qwen3-VL-Moe; not mergeable as the same bug.", "right": "issue:44038"}, {"accept": false, "left": "issue:44829", "reason": "Flash-attention training degeneration and the 2^24 categories runtime limit are unrelated issues despite both being runtime failures.", "right": "issue:45245"}], "summary": "This cluster is heterogeneous: the items are separate bug reports across different models, configs, and utilities, with only superficial similarity in a few titles. I would not merge any of the soft edges."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42222|issue:44496", "issue:44117|issue:44987", "issue:43931|issue:44410", "issue:43986|issue:45341", "issue:43874|issue:44263", "issue:43408|issue:44368", "issue:43335|issue:45071", "issue:43986|issue:45372", "issue:43931|issue:44038", "issue:44829|issue:45245"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6518, "estimated_input_tokens": 3131, "item_count": 17, "node_count": 17, "serialized_chars": 12523, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:15:28Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "48bbcf175243f65b6fb7b913d5aeb155672ea50aafb06b756fb45cc64b166b82", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38617", "issue:43064", "issue:43531", "issue:43976", "issue:44062", "issue:44117", "issue:44261", "issue:44315", "issue:44351", "issue:44410", "issue:44464", "issue:44485", "issue:44704", "issue:45071", "issue:45103", "issue:45341", "issue:45375"], "result": {"analyst_result": {"best_issue_reason": "No issue is a strong global representative; the overlap in wording is superficial rather than a shared root cause.", "best_pr_reason": null, "canonical_issue_reason": "No single issue is a valid canonical for this cluster because the items describe different underlying bugs in different subsystems and model families.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43976", "reason": "Both are version/regression reports, but one is a Python compatibility failure and the other is a PretrainedConfig type-checking break; different root causes and fixes.", "right": "issue:45071"}, {"accept": false, "left": "issue:43064", "reason": "Completely different areas: optimizer state handling for FSDP2/PEFT versus Liger Kernel application with model_init.", "right": "issue:44315"}, {"accept": false, "left": "issue:44464", "reason": "Chunked generation/compiled forward inconsistency is unrelated to PretrainedConfig typing.", "right": "issue:45071"}, {"accept": false, "left": "issue:43531", "reason": "Qwen3-MoE sliding_window behavior and a testing_utils bug are unrelated.", "right": "issue:45341"}, {"accept": false, "left": "issue:38617", "reason": "Both are import errors, but they concern different missing symbols in different modules with no shared code path evident.", "right": "issue:44351"}, {"accept": false, "left": "issue:44261", "reason": "Different model-specific implementation issues: MLA q_a_layernorm epsilon/config handling versus GLM-5 RoPE logic.", "right": "issue:44485"}, {"accept": false, "left": "issue:44704", "reason": "AutoProcessor cached_file kwargs propagation is unrelated to an auto_docstring AttributeError from postponed annotations.", "right": "issue:45103"}, {"accept": false, "left": "issue:44410", "reason": "Both mention missing fields, but one is a Qwen3-next architecture projection bug and the other is a config field being dropped by strict typing; different concrete failures.", "right": "issue:45375"}, {"accept": false, "left": "issue:44062", "reason": "Tokenizer AddedToken keyword duplication and TOKENIZER_MAPPING_NAMES returning None are different tokenizer initialization bugs.", "right": "issue:44117"}], "summary": "This cluster is heterogeneous: the issues cover unrelated import errors, tokenizer/config regressions, model-specific bugs, training bugs, and docs/annotation tooling issues. None of the soft pairs look like true duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43976|issue:45071", "issue:43064|issue:44315", "issue:44464|issue:45071", "issue:43531|issue:45341", "issue:38617|issue:44351", "issue:44261|issue:44485", "issue:44704|issue:45103", "issue:44410|issue:45375", "issue:44062|issue:44117"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6608, "estimated_input_tokens": 3176, "item_count": 17, "node_count": 17, "serialized_chars": 12702, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:16:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "49e633aa9d2467f218b63c178e72d7b8e80b9db160e7a5af4970099f74199975", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:38617", "issue:43066", "issue:43097", "issue:43502", "issue:43582", "issue:43824", "issue:43873", "issue:43906", "issue:43957", "issue:43986", "issue:43992", "issue:44038", "issue:44117", "issue:44351", "issue:44410", "issue:44484", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": "43502 is the clearest standalone regression report: the symptom is specific ('local_files_only=True' still triggers API calls), the impact is broad, and it is the most actionable candidate for a duplicate anchor if similar reports appear.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue stands out; the items cover distinct bugs and regressions across imports, loading, quantization/offloading, and model-specific breakages.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43502", "reason": "Different failures: network calls despite local_files_only vs a missing embed_tokens.weight during UMT5Encoder loading.", "right": "issue:43992"}, {"accept": false, "left": "issue:43986", "reason": "One is an AutoProcessor crash without torchvision; the other is a tokenizer mapping None-handling bug. Related loading area, but not the same defect.", "right": "issue:44117"}, {"accept": false, "left": "issue:38617", "reason": "Both are v5 breakage reports, but they concern different removed symbols/APIs and different import paths.", "right": "issue:43097"}, {"accept": false, "left": "issue:44038", "reason": "Different model families and symptoms; Qwen3-VL-Moe v5 breakage is not the same as missing qkv/gate projections in qwen3next layer 0.", "right": "issue:44410"}, {"accept": false, "left": "issue:43582", "reason": "No meaningful overlap beyond being bug reports; Apple Silicon TypeError in caching_allocator_warmup is unrelated to a testing_utils issue.", "right": "issue:45341"}, {"accept": false, "left": "issue:43066", "reason": "43906 is an isolated reproduction of another upstream issue, not the same as the 'wrong tokenizer decoder type' report.", "right": "issue:43906"}, {"accept": false, "left": "issue:43957", "reason": "Meta-device loading failures and a HybridCache import error are separate code paths with different symptoms and likely fixes.", "right": "issue:44351"}, {"accept": false, "left": "issue:43824", "reason": "Missing Qwen2_5_VLForConditionalGeneration export is a model registration/import issue, while the other is a processor crash due to missing torchvision.", "right": "issue:43986"}, {"accept": false, "left": "issue:43992", "reason": "A missing embed_tokens.weight during load is unrelated to the max_shard_size default question in save_pretrained().", "right": "issue:44484"}, {"accept": false, "left": "issue:43097", "reason": "Removed tie_embeddings_and_encoder_decoder and quantization/offloading behavior are different problems in different parts of the stack.", "right": "issue:43873"}], "summary": "Mostly unrelated Transformers bug reports. The soft pairs share broad version/loading/theme similarity, but not the same underlying defect or fix path, so none should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43502|issue:43992", "issue:43986|issue:44117", "issue:38617|issue:43097", "issue:44038|issue:44410", "issue:43582|issue:45341", "issue:43066|issue:43906", "issue:43957|issue:44351", "issue:43824|issue:43986", "issue:43992|issue:44484", "issue:43097|issue:43873"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6960, "estimated_input_tokens": 3352, "item_count": 18, "node_count": 18, "serialized_chars": 13406, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:16:22Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6830af64cbe7d56c8ea0aaf7941173b7a7a861fe09752291efa08367006f37ac", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42886", "issue:42907", "issue:43012", "issue:43097", "issue:43299", "issue:43352", "issue:43381", "issue:43644", "issue:43825", "issue:43901", "issue:43937", "issue:44368", "issue:44380", "issue:44410", "issue:44484", "issue:44655", "issue:44792", "issue:44829"], "result": {"analyst_result": {"best_issue_reason": "No best issue candidate: the cluster is not a duplicate set, so selecting one issue as canonical would be misleading.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits this cluster; the items are not duplicates of one another and cover distinct subsystems and failure modes.", "canonical_pr_reason": null, "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "issue:43012", "reason": "Both involve training/runtime warnings, but one is a PyTorch precision warning for bfloat16 compilation while the other is a degenerate-training regression with flash_attention_3; different symptoms and code paths.", "right": "issue:44829"}, {"accept": false, "left": "issue:43299", "reason": "Both mention Qwen models, but one is a Qwen3VL MoE loading failure and the other is missing attention projections in qwen3next layer 0; different model families and defects.", "right": "issue:44410"}, {"accept": false, "left": "issue:43352", "reason": "Unsupported Flash Attention 2 for a specific model is not the same problem as GPT2 attention scaling being ignored under SDPA/FlashAttention; backend and failure mode differ.", "right": "issue:44380"}, {"accept": false, "left": "issue:43644", "reason": "One reports junk in non-persistent buffers during Transformers 5.0.0 loading, the other asks about the default max_shard_size in save_pretrained; unrelated serialization topics.", "right": "issue:44484"}, {"accept": false, "left": "issue:43825", "reason": "The first is a bad pipeline() error message about translation tasks in v5, while the second is inability to save Pipeline objects with save_pretrained; same broad area, but different bugs.", "right": "issue:44655"}, {"accept": false, "left": "issue:43097", "reason": "One concerns a removed config field in 5.0.0, the other a docs/behavior mismatch for return_all_scores in text classification; distinct API/documentation issues.", "right": "issue:43901"}, {"accept": false, "left": "issue:43012", "reason": "Both are warning-related, but one is a float32 precision warning when compiling a bfloat16 model and the other is a tie_word_embeddings warning during LoRA fine-tuning; not the same bug.", "right": "issue:44368"}, {"accept": false, "left": "issue:42886", "reason": "Tokenizer offline cache loading and gradient checkpointing in eval mode are unrelated issues affecting different parts of the stack.", "right": "issue:43381"}, {"accept": false, "left": "issue:43937", "reason": "A GLM-5 GenerationConfig validation failure and a Janus image-generation test failure involve different models and different failure conditions.", "right": "issue:44792"}, {"accept": false, "left": "issue:42907", "reason": "Both mention saving, but one is about saving dequantized Ministrals/Devstrals and the other about saving Pipeline objects; different object types and likely different fixes.", "right": "issue:44655"}], "summary": "These items are a heterogeneous collection of unrelated Transformers issues spanning cache/offline loading, save_pretrained/pipeline serialization, model-specific loading regressions, attention backend behavior, documentation mismatches, and assorted warning/reporting changes. None of the suggested soft pairs look like the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43012|issue:44829", "issue:43299|issue:44410", "issue:43352|issue:44380", "issue:43644|issue:44484", "issue:43825|issue:44655", "issue:43097|issue:43901", "issue:43012|issue:44368", "issue:42886|issue:43381", "issue:43937|issue:44792", "issue:42907|issue:44655"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7130, "estimated_input_tokens": 3437, "item_count": 18, "node_count": 18, "serialized_chars": 13746, "soft_pair_count": 11}, "cached_at": "2026-04-14T21:17:06Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "24ae86765c4afbb99afcb0fc051fcef4f763c20c3c7a7b7fa3750eafe343d910", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36683", "issue:43012", "issue:43064", "issue:43208", "issue:43352", "issue:43525", "issue:43526", "issue:43550", "issue:43582", "issue:43618", "issue:43761", "issue:43976", "issue:44117", "issue:44380", "issue:44483", "issue:44514", "issue:44683", "issue:45290"], "result": {"analyst_result": {"best_issue_reason": "Issue 43761 is the clearest triage target in this batch: it has a concrete regression claim, an explicit repro condition, and aligns with the only accepted near-duplicate.", "best_pr_reason": null, "canonical_issue_reason": "No broad canonical across the full set, but issue 43761 is the strongest representative of the CLIP regression cluster because it clearly states the v5-forward regression and affected output path.", "canonical_pr_reason": null, "confidence": 0.87, "soft_edge_verdicts": [{"accept": false, "left": "issue:43525", "reason": "Different bugs: one is a missing config attribute in Llama4Config, the other is tokenizer mapping unexpectedly returning None in from_pretrained.", "right": "issue:44117"}, {"accept": false, "left": "issue:43012", "reason": "Both mention compilation, but one is a bfloat16 precision warning while the other is a concrete torch.compile + SDPA failure for Bamba; not the same underlying bug.", "right": "issue:43550"}, {"accept": false, "left": "issue:43352", "reason": "Unrelated failures: Flash Attention support for Nemotron vs a Python compatibility issue in Transformers 5.1.0.", "right": "issue:43976"}, {"accept": false, "left": "issue:43208", "reason": "Completely different subsystems and symptoms: xLSTM training bugs versus a BeitImageProcessorFast label reduction bug.", "right": "issue:43526"}, {"accept": false, "left": "issue:43064", "reason": "FSDP2/PEFT optimizer-state corruption is unrelated to GPT2 attention scaling being ignored under SDPA/FlashAttention.", "right": "issue:44380"}, {"accept": false, "left": "issue:43618", "reason": "CLIP output assignment regression versus GPT2 attention scaling; different models and different failure modes.", "right": "issue:44380"}, {"accept": false, "left": "issue:44483", "reason": "One is an OpenAI-style chat completions endpoint compatibility issue; the other is a Qwen2_5_VL chat template crash on batched tool-call input.", "right": "issue:44514"}, {"accept": false, "left": "issue:36683", "reason": "No overlap: Gemma3Config missing vocab_size versus an Apple Silicon TypeError in caching_allocator_warmup.", "right": "issue:43582"}, {"accept": true, "left": "issue:43618", "reason": "Both describe the same CLIP v5 regression where optional vision outputs are no longer populated in the model output path.", "right": "issue:43761"}, {"accept": false, "left": "issue:43012", "reason": "Different compilation problems: a float32 precision warning for bfloat16 models versus a flex_attention failure on torch >= 2.9.", "right": "issue:44683"}, {"accept": false, "left": "issue:44483", "reason": "Both involve chat APIs/templates, but one is an endpoint request rejection and the other is a tokenizer/template crash on assistant tool-call messages.", "right": "issue:45290"}], "summary": "The set is mostly unrelated. The only plausible duplicate pair is the CLIP vision regression around missing optional outputs; all other soft pairs describe different code paths or symptoms."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43525|issue:44117", "issue:43012|issue:43550", "issue:43352|issue:43976", "issue:43208|issue:43526", "issue:43064|issue:44380", "issue:43618|issue:44380", "issue:44483|issue:44514", "issue:36683|issue:43582", "issue:43618|issue:43761", "issue:43012|issue:44683", "issue:44483|issue:45290"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6582, "estimated_input_tokens": 3163, "item_count": 17, "node_count": 17, "serialized_chars": 12651, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:17:34Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7c8586ca1afcd0598e69a5aaa5fafd7f7c32b18e76b896ceb6659fd72b0436ab", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36683", "issue:38617", "issue:42994", "issue:43065", "issue:43299", "issue:43475", "issue:43502", "issue:43756", "issue:43867", "issue:43901", "issue:44038", "issue:44230", "issue:44483", "issue:44485", "issue:44655", "issue:45216", "issue:45381"], "result": {"analyst_result": {"best_issue_reason": "issue:43502 is the most representative and maintainable issue to anchor the cluster around: specific symptom, general utility, and the highest engagement among the candidates.", "best_pr_reason": null, "canonical_issue_reason": "issue:43502 is the clearest broadly scoped bug report in the set, with the strongest discussion signal and a concrete, actionable failure mode.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:43756", "reason": "Both mention RoPE, but they concern different models and different failure modes (Smollm3 layer dropping vs GLM-5 RoPE implementation).", "right": "issue:44485"}, {"accept": false, "left": "issue:38617", "reason": "ImportError from configuration_utils and unwanted network requests with local_files_only=True are unrelated code paths and symptoms.", "right": "issue:43502"}, {"accept": false, "left": "issue:44483", "reason": "One is a chat-completions request validation problem; the other is a Qwen2.5-VL video position_ids bug. Different API surface and model logic.", "right": "issue:45381"}, {"accept": false, "left": "issue:43299", "reason": "Different loading bugs: Qwen3VL-Moe regression on v5.0.0.dev0 versus a state_dict ordering issue. Same broad area, not the same defect.", "right": "issue:43867"}, {"accept": false, "left": "issue:44038", "reason": "One reports broken loading on transformers 5.0 for Qwen3-VL-Moe; the other is a request for fp8 inference support. Not the same change or bug.", "right": "issue:44230"}, {"accept": false, "left": "issue:43901", "reason": "Docs mismatch for return_all_scores is unrelated to save_pretrained failing on Pipeline objects.", "right": "issue:44655"}, {"accept": false, "left": "issue:42994", "reason": "Both involve saving, but one is quantized-model saving and the other is Pipeline.save_pretrained. Different underlying problems.", "right": "issue:44655"}, {"accept": false, "left": "issue:36683", "reason": "Both are missing-attribute errors, but they affect different model classes and different attributes, so they are not the same bug.", "right": "issue:43475"}, {"accept": false, "left": "issue:43065", "reason": "Sam3PixelDecoder dummy Conv2d discussion is unrelated to the /v1/chat/completions request acceptance bug.", "right": "issue:44483"}, {"accept": false, "left": "issue:43867", "reason": "Both touch save/load behavior, but one is a generic state_dict sorting error and the other is a Qwen3.5 save_pretrained regression; the evidence is too weak to treat as one defect.", "right": "issue:45216"}], "summary": "Mostly heterogeneous issues with only superficial overlap around model loading/saving and RoPE/model-specific regressions; none of the soft pairs look like the same underlying bug."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43756|issue:44485", "issue:38617|issue:43502", "issue:44483|issue:45381", "issue:43299|issue:43867", "issue:44038|issue:44230", "issue:43901|issue:44655", "issue:42994|issue:44655", "issue:36683|issue:43475", "issue:43065|issue:44483", "issue:43867|issue:45216"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6866, "estimated_input_tokens": 3305, "item_count": 18, "node_count": 18, "serialized_chars": 13217, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:18:03Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "447cb23f6c7e2fabc93420e22ab5b7584e341a1f1bb3a8fe0cf5b554a6dc3037", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36683", "issue:43064", "issue:43066", "issue:43352", "issue:43525", "issue:43937", "issue:43976", "issue:43992", "issue:44077", "issue:44220", "issue:44263", "issue:44380", "issue:44492", "issue:44655", "issue:44938", "issue:45310", "issue:45356", "issue:45440"], "result": {"analyst_result": {"best_issue_reason": "Issue 45440 is the most substantive and generally useful report in the set: it points to a native-vs-remote implementation divergence in DeepseekV3MoE, which is more representative of a real code-path defect than the narrower config/tokenizer/version-specific reports.", "best_pr_reason": null, "canonical_issue_reason": "No single issue is a true canonical for this cluster because the items describe separate bugs across different model families and subsystems. If forced, issue 45440 is the broadest implementation-regression report, but it does not subsume the others.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43352", "reason": "Different models and failure modes: Flash Attention support gating for Nemotron vs invalid GenerationConfig for GLM-5. Same broad runtime category, but not the same bug.", "right": "issue:43937"}, {"accept": false, "left": "issue:44220", "reason": "Unrelated code paths: `_torch_extract_fbank_features()` in audio preprocessing vs `save_pretrained` for Pipeline serialization.", "right": "issue:44655"}, {"accept": false, "left": "issue:44263", "reason": "Different model internals and defects: `torch.split` return handling in GlmMoeDsaIndexer vs DeepseekV3MoE diverging from the remote implementation.", "right": "issue:45440"}, {"accept": false, "left": "issue:43066", "reason": "Both are tokenizer-related regressions, but they concern different concrete problems: wrong decoder type vs codec handling / warning behavior in Kimi-K2.5.", "right": "issue:45356"}, {"accept": false, "left": "issue:36683", "reason": "A missing config attribute on Gemma3Config is not the same bug as patchtsmixer's `post_init` handling.", "right": "issue:44077"}, {"accept": false, "left": "issue:43064", "reason": "Training/FSDP optimizer-state corruption is unrelated to a typo in cache strategy text.", "right": "issue:44492"}, {"accept": false, "left": "issue:43992", "reason": "Different model-loading failures for different architectures: missing `embed_tokens.weight` in UMT5Encoder vs Qwen3.5 MoE `from_pretrained` error.", "right": "issue:45310"}, {"accept": false, "left": "issue:44077", "reason": "Distinct issues in different subsystems: `post_init` policy for patchtsmixer vs GPT2 attention scaling being ignored under SDPA/FlashAttention.", "right": "issue:44380"}, {"accept": false, "left": "issue:36683", "reason": "Both are config attribute errors, but for different models and missing fields (`vocab_size` vs `pad_token_id`), so they are not the same underlying defect.", "right": "issue:43525"}, {"accept": false, "left": "issue:43976", "reason": "Separate Python-version compatibility regressions with different symptoms and likely different root causes.", "right": "issue:44938"}], "summary": "The cluster is a loose collection of unrelated Transformers issues spanning different models, tokenizers, training/runtime compatibility, and config/load-path bugs. The soft pairs share only broad theme similarity, not the same concrete defect, so none should be merged."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43352|issue:43937", "issue:44220|issue:44655", "issue:44263|issue:45440", "issue:43066|issue:45356", "issue:36683|issue:44077", "issue:43064|issue:44492", "issue:43992|issue:45310", "issue:44077|issue:44380", "issue:36683|issue:43525", "issue:43976|issue:44938"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7058, "estimated_input_tokens": 3401, "item_count": 18, "node_count": 18, "serialized_chars": 13604, "soft_pair_count": 11}, "cached_at": "2026-04-14T21:21:14Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2e69019716950490aa562679280606944f76093391a0902290374425dae1c95a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36683", "issue:41669", "issue:43010", "issue:43064", "issue:43352", "issue:43441", "issue:43618", "issue:43867", "issue:43931", "issue:44188", "issue:44242", "issue:44246", "issue:44380", "issue:44938", "issue:45071", "issue:45216", "issue:45278", "issue:45375"], "result": {"analyst_result": {"best_issue_reason": "No issue is a strong global representative for deduping; at best the cluster loosely spans import/load/regression themes, but not one underlying defect.", "best_pr_reason": "No pull requests are present in this cluster, so there is no best PR.", "canonical_issue_reason": "No single canonical issue is appropriate: the candidate pairs are different bugs/regressions across unrelated code paths, so selecting one anchor would be arbitrary.", "canonical_pr_reason": "No pull requests are present in this cluster, so there is no canonical PR.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:44188", "reason": "Both involve attention backends, but one is a torch.compile branching bug and the other is GPT2 attention scaling being ignored; different code paths and fixes.", "right": "issue:44380"}, {"accept": false, "left": "issue:43618", "reason": "CLIP attentions missing from output and Qwen3.5 save_pretrained checkpoint corruption are unrelated model-output vs serialization regressions.", "right": "issue:45216"}, {"accept": false, "left": "issue:36683", "reason": "Gemma3Config missing vocab_size and MoE load-balancing loss omission are unrelated config attribute vs loss-computation bugs.", "right": "issue:44242"}, {"accept": false, "left": "issue:43010", "reason": "A no_grad decorator on cache/layer update is unrelated to broad import failures after upgrading versions.", "right": "issue:45278"}, {"accept": false, "left": "issue:43441", "reason": "Both mention FlashAttention/backends, but Ministral-3 FA failure and GPT2 attention scaling being ignored are different model-specific bugs.", "right": "issue:44380"}, {"accept": false, "left": "issue:41669", "reason": "Both concern import performance, but one is import * usage in model modules and the other is intermittent slow import of transformers; not the same concrete defect.", "right": "issue:44246"}, {"accept": false, "left": "issue:36683", "reason": "Gemma3Config missing vocab_size and NemotronHForCausalLM lacking Flash Attention 2.0 support are different model API issues.", "right": "issue:43352"}, {"accept": false, "left": "issue:43352", "reason": "FlashAttention 2.0 unsupported for Nemotron vs Qwen3-VL model-loading shape mismatch are unrelated failures.", "right": "issue:43931"}, {"accept": false, "left": "issue:43867", "reason": "Sorted state_dict load failure and strict-dropping of a missing config field are both loading-related, but they are different mechanisms and fixes.", "right": "issue:45375"}, {"accept": false, "left": "issue:36683", "reason": "A missing config attribute and a Trainer FSDP2/PEFT optimizer-state rank bug are unrelated issues.", "right": "issue:43064"}, {"accept": false, "left": "issue:44938", "reason": "Python 3.14 import failure and v5.4.0 PretrainedConfig type-checking regression are both compatibility issues, but not the same root cause.", "right": "issue:45071"}], "summary": "The set is heterogeneous. The soft pairs mostly share only broad subsystem keywords (imports, loading, attention, configs) but each describes a different concrete failure mode, so none are safe duplicate merges."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44188|issue:44380", "issue:43618|issue:45216", "issue:36683|issue:44242", "issue:43010|issue:45278", "issue:43441|issue:44380", "issue:41669|issue:44246", "issue:36683|issue:43352", "issue:43352|issue:43931", "issue:43867|issue:45375", "issue:36683|issue:43064", "issue:44938|issue:45071"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6842, "estimated_input_tokens": 3293, "item_count": 18, "node_count": 18, "serialized_chars": 13172, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:21:54Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "23db93b11b3f54e3803dbf2c06f03d40cb568c93bb633dd03114c93795af5d11", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36296", "issue:38617", "issue:41669", "issue:43010", "issue:43575", "issue:43618", "issue:43644", "issue:43867", "issue:43976", "issue:43992", "issue:44077", "issue:44360", "issue:44484", "issue:44554", "issue:44938", "issue:45357", "issue:45381", "issue:45440"], "result": {"analyst_result": {"best_issue_reason": "Issue 45440 is the best representative anchor only in the sense of having the clearest active signal and concrete model-divergence impact, but it does not subsume the other issues.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue is evident because the set spans distinct bugs; if one anchor is needed, issue 45440 is the strongest current report due to being open, specific, and already referenced.", "canonical_pr_reason": null, "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "issue:43867", "reason": "Both concern loading/saving state, but one is about sorted state_dict load failures and the other about wrong visual encoder keys in save_pretrained; different failure modes and code paths.", "right": "issue:45357"}, {"accept": false, "left": "issue:43010", "reason": "One is a no_grad decoration proposal for cache/layer updates; the other is a Python version compatibility/load failure. No shared underlying bug.", "right": "issue:43976"}, {"accept": false, "left": "issue:44360", "reason": "DSA ReLU indexing discussion is unrelated to DeepseekV3MoE divergence from remote implementation; same broad area, different concrete problem.", "right": "issue:45440"}, {"accept": false, "left": "issue:44938", "reason": "Python 3.14 import/load failure and qwen2.5-vl video vision_position_ids bug are separate issues with different symptoms and fixes.", "right": "issue:45381"}, {"accept": false, "left": "issue:43618", "reason": "CLIPOutput attentions regression and patchtsmixer post_init allowance are unrelated model API bugs.", "right": "issue:44077"}, {"accept": false, "left": "issue:41669", "reason": "Import-star performance cleanup is not the same as the MPS attention correctness issue; one is refactoring/perf, the other is backend numerical correctness.", "right": "issue:44554"}, {"accept": false, "left": "issue:43644", "reason": "Non-persistent buffer junk initialization and Python 3.14 loading failure are different root causes and fixes.", "right": "issue:44938"}, {"accept": false, "left": "issue:43867", "reason": "State_dict load error when sorted and max_shard_size default discussion are not the same bug; one is loading behavior, the other is a save_pretrained size policy question.", "right": "issue:44484"}, {"accept": false, "left": "issue:38617", "reason": "ImportError for layer_type_validation and missing embed_tokens.weight in UMT5Encoder.from_pretrained are separate loading regressions.", "right": "issue:43992"}, {"accept": false, "left": "issue:36296", "reason": "Both mention tensor parallelism, but one is a generic training bug and the other is an OOM when loading a specific Qwen2 model with tp; not the same underlying defect.", "right": "issue:43575"}], "summary": "These items are mostly unrelated Transformers bugs and regressions that only share broad subsystem language. None of the soft pairs look like the same underlying issue or a mergeable duplicate PR-equivalent change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43867|issue:45357", "issue:43010|issue:43976", "issue:44360|issue:45440", "issue:44938|issue:45381", "issue:43618|issue:44077", "issue:41669|issue:44554", "issue:43644|issue:44938", "issue:43867|issue:44484", "issue:38617|issue:43992", "issue:36296|issue:43575"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7012, "estimated_input_tokens": 3378, "item_count": 18, "node_count": 18, "serialized_chars": 13510, "soft_pair_count": 11}, "cached_at": "2026-04-14T21:22:16Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "5e2ac4d6ebb3a272fb2af15901968f12548b35bc442854930732f9477a1c2c2e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:28282", "issue:33453", "issue:42548", "issue:42890", "issue:43010", "issue:43232", "issue:43502", "issue:43618", "issue:43723", "issue:44062", "issue:44075", "issue:44112", "issue:44351", "issue:44556", "issue:44857", "issue:44964", "issue:44977", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": "43723 best captures the shared loading-regression theme and could serve as the umbrella issue for the closest match in this set.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43723 is the broadest representative of the only clearly related theme here: tokenizer/model loading regressions in v5. It is more general than the narrower checkpoint-upgrade report in 44556.", "canonical_pr_reason": null, "confidence": 0.54, "soft_edge_verdicts": [{"accept": false, "left": "issue:42890", "reason": "Both mention model-related failures, but one is a SAM HQ test flake and the other is a Phi-4 multimodal loading regression; different bugs and code paths.", "right": "issue:44964"}, {"accept": false, "left": "issue:42890", "reason": "Both are CI/test failures, but they affect different tests and subsystems; this looks like generic flakiness rather than one underlying bug.", "right": "issue:44112"}, {"accept": false, "left": "issue:28282", "reason": "One is an AutoModel/PyTorch import error and the other is a tokenizer-loading regression; too different to be the same bug.", "right": "issue:33453"}, {"accept": false, "left": "issue:43618", "reason": "CLIPOutput attentions missing and an AMP/CUDA loss crash are unrelated model behaviors.", "right": "issue:44857"}, {"accept": false, "left": "issue:43618", "reason": "Missing CLIP attentions and Qwen3.5 flash-attention generation problems are different failures in different models/code paths.", "right": "issue:44977"}, {"accept": false, "left": "issue:43502", "reason": "Local-files-only network leakage and AddedToken argument duplication are unrelated loading issues.", "right": "issue:44062"}, {"accept": false, "left": "issue:43010", "reason": "Cache/layer no_grad decoration and SGD optimizer args not being used are separate implementation bugs.", "right": "issue:44075"}, {"accept": false, "left": "issue:42890", "reason": "A specific SAM HQ test flake and a general testing_utils bug are not the same underlying issue.", "right": "issue:45341"}, {"accept": true, "left": "issue:43723", "reason": "Both describe the same v5 loading regression family: tokenizer/model checkpoint reload failures after upgrading to v5, likely sharing the same loading code path.", "right": "issue:44556"}, {"accept": false, "left": "issue:43010", "reason": "These are both generation/caching related only at a high level; one is about no_grad on cache updates, the other about sync_gpus behavior in generation.", "right": "issue:43232"}, {"accept": false, "left": "issue:42548", "reason": "Both are import errors from transformers, but they reference different missing symbols and likely different export regressions.", "right": "issue:44351"}], "summary": "This cluster is mostly heterogeneous; only the tokenizer/checkpoint-loading v5 reports look plausibly duplicate. The rest are distinct import, CI, cache, generation, or model-specific bugs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42890|issue:44964", "issue:42890|issue:44112", "issue:28282|issue:33453", "issue:43618|issue:44857", "issue:43618|issue:44977", "issue:43502|issue:44062", "issue:43010|issue:44075", "issue:42890|issue:45341", "issue:43723|issue:44556", "issue:43010|issue:43232", "issue:42548|issue:44351"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6894, "estimated_input_tokens": 3319, "item_count": 18, "node_count": 18, "serialized_chars": 13276, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:22:38Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "fd371777569ca9342ade03d7f977a65b220fc07bc2c3e6f442c8eba5e4e33287", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:30990", "issue:31795", "issue:36296", "issue:43010", "issue:43065", "issue:43502", "issue:44075", "issue:44222", "issue:44315", "issue:44355", "issue:44360", "issue:44368", "issue:44387", "issue:44485", "issue:44556", "issue:44749", "issue:45125", "issue:45375"], "result": {"analyst_result": {"best_issue_reason": "issue:44387 is the strongest standalone bug report: it describes a concrete regression (higher CUDA reserved memory / OOM under int4 quantization), has clear scope, and attracted substantial discussion.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits the cluster; the items span distinct bugs, warnings, config problems, and docs topics rather than one underlying defect.", "canonical_pr_reason": null, "confidence": 0.91, "soft_edge_verdicts": [{"accept": false, "left": "issue:44387", "reason": "Both involve version/regression complaints, but one is int4 memory/OOM and the other is checkpoint reload failure; different failure modes and fixes.", "right": "issue:44556"}, {"accept": false, "left": "issue:44222", "reason": "FP8 save_pretrained for MoE and missing _tp_plan for tensor parallelism are separate model-handling bugs.", "right": "issue:45125"}, {"accept": false, "left": "issue:43010", "reason": "`no_grad` on cache update and a dummy `Conv2d` in Sam3PixelDecoder are unrelated implementation issues.", "right": "issue:43065"}, {"accept": false, "left": "issue:45125", "reason": "Both mention Qwen3_5Moe variants, but one is tensor parallelism metadata and the other is a missing config field dropped by strict validation.", "right": "issue:45375"}, {"accept": false, "left": "issue:44355", "reason": "Compiled Python file errors and post-upgrade data filtering slowdown are not the same bug.", "right": "issue:44749"}, {"accept": false, "left": "issue:30990", "reason": "Model loading hangs versus forward-method documentation confusion are clearly different issues.", "right": "issue:31795"}, {"accept": false, "left": "issue:43502", "reason": "Offline file-loading API calls and GLM-5 RoPE implementation are unrelated.", "right": "issue:44485"}, {"accept": false, "left": "issue:43010", "reason": "A cache/layer update decorator bug is not the same as a model warning about `tie_word_embeddings`.", "right": "issue:44368"}, {"accept": false, "left": "issue:44315", "reason": "Liger Kernel application during `model_init` and a DSA indexer ReLU discussion are different code paths and symptoms.", "right": "issue:44360"}, {"accept": false, "left": "issue:36296", "reason": "Tensor-parallel training bug and SGD args not being used are separate optimizer/model-parallel concerns.", "right": "issue:44075"}], "summary": "This cluster is heterogeneous: the soft-linked pairs are mostly unrelated issues sharing only broad thematic similarity. I found no true duplicates or mergeable PR-equivalents."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44387|issue:44556", "issue:44222|issue:45125", "issue:43010|issue:43065", "issue:45125|issue:45375", "issue:44355|issue:44749", "issue:30990|issue:31795", "issue:43502|issue:44485", "issue:43010|issue:44368", "issue:44315|issue:44360", "issue:36296|issue:44075"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6950, "estimated_input_tokens": 3347, "item_count": 18, "node_count": 18, "serialized_chars": 13386, "soft_pair_count": 11}, "cached_at": "2026-04-14T21:23:15Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "06eff93f023b2084752c3558afee6956e6307227a37cecc23d011f66bfdb7227", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:33453", "issue:34689", "issue:36296", "issue:38175", "issue:42831", "issue:42890", "issue:43232", "issue:43299", "issue:43502", "issue:43874", "issue:44222", "issue:44230", "issue:44355", "issue:44373", "issue:44556", "issue:44655", "issue:44961", "issue:45071"], "result": {"analyst_result": {"best_issue_reason": "No global best issue stands out as a true cluster representative; 43502 is the broadest behavior report, but it is still unrelated to most of the other issues.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue: the set is heterogeneous, with unrelated topics spanning tokenizer/model loading, FP8 behavior, tests, docs, and generation internals.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:42890", "reason": "One is about missing set_seed in a model integration test; the other is a wrong docstring for position_ids. Different problem classes.", "right": "issue:44373"}, {"accept": false, "left": "issue:38175", "reason": "Unexpected zero probabilities in a specific model is unrelated to unwanted API requests when local_files_only=True.", "right": "issue:43502"}, {"accept": false, "left": "issue:43502", "reason": "Local-files-only network requests and the unrelated 'racoon' issue do not describe the same bug.", "right": "issue:44961"}, {"accept": false, "left": "issue:43299", "reason": "Both mention loading regressions, but they affect different versions/models and different failure modes; not the same underlying defect.", "right": "issue:44556"}, {"accept": false, "left": "issue:44355", "reason": "Compiled Python file errors are unrelated to saving Pipeline objects with save_pretrained().", "right": "issue:44655"}, {"accept": false, "left": "issue:44355", "reason": "A compiled-file runtime error is not the same as PretrainedConfig type-checking breaking in v5.4.0.", "right": "issue:45071"}, {"accept": false, "left": "issue:42831", "reason": "FP8 accuracy issues and a missing image-processor method causing AttributeError are distinct bugs.", "right": "issue:43874"}, {"accept": false, "left": "issue:42890", "reason": "Test seeding gaps are unrelated to the generation kwarg update after sync_gpus.", "right": "issue:43232"}, {"accept": false, "left": "issue:44222", "reason": "Both touch FP8/MoE, but one is about save_pretrained and the other about inference support; not mergeable as one fix.", "right": "issue:44230"}, {"accept": false, "left": "issue:33453", "reason": "Both are loading regressions, but they involve different models/components and different root causes.", "right": "issue:34689"}, {"accept": false, "left": "issue:33453", "reason": "Tokenizer loading regression and tensor-parallel training bug are unrelated.", "right": "issue:36296"}], "summary": "The soft pairs are broadly related only by high-level themes like loading regressions, FP8, or test/runtime failures, but none appear to describe the same concrete bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42890|issue:44373", "issue:38175|issue:43502", "issue:43502|issue:44961", "issue:43299|issue:44556", "issue:44355|issue:44655", "issue:44355|issue:45071", "issue:42831|issue:43874", "issue:42890|issue:43232", "issue:44222|issue:44230", "issue:33453|issue:34689", "issue:33453|issue:36296"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6802, "estimated_input_tokens": 3273, "item_count": 17, "node_count": 17, "serialized_chars": 13091, "soft_pair_count": 12}, "cached_at": "2026-04-14T21:23:43Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "81e8fdce64a6281fbf326d980f549a3d101c7d7bdd2659ff94c9b1f9cf5d006e", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:31795", "issue:33357", "issue:33453", "issue:38617", "issue:41628", "issue:42548", "issue:43576", "issue:43673", "issue:43824", "issue:43827", "issue:44351", "issue:44485", "issue:44492", "issue:44908", "issue:44938", "issue:44961", "issue:45071"], "result": {"analyst_result": {"best_issue_reason": "43576 is the best representative only in the sense that it is the most prominent, user-facing regression report here; however, it is not actually a duplicate of the others.", "best_pr_reason": null, "canonical_issue_reason": "No single true canonical issue stands out because the items are mostly unrelated. If one must be picked as the cluster representative, 43576 is the broadest and most substantive v5 regression report with the most discussion/inbound references.", "canonical_pr_reason": null, "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "issue:43827", "reason": "Docs referencing removed pipeline APIs and an unrelated issue titled \"racoon\" are clearly not the same bug.", "right": "issue:44961"}, {"accept": false, "left": "issue:43673", "reason": "A chunked_prefill cache regression and a cache-strategy typo are different problems and code paths.", "right": "issue:44492"}, {"accept": false, "left": "issue:43576", "reason": "Broken transformers env command vs missing Qwen2_5_VL export are unrelated failures.", "right": "issue:43824"}, {"accept": false, "left": "issue:43576", "reason": "Different regressions: CLI/env command breakage versus type-checking breakage in PretrainedConfig.", "right": "issue:45071"}, {"accept": false, "left": "issue:38617", "reason": "ImportError for layer_type_validation is unrelated to the transformers env command being broken.", "right": "issue:43576"}, {"accept": false, "left": "issue:33357", "reason": "MacOS bus error with a community CLIP model and tokenizer loading regression are different underlying issues.", "right": "issue:33453"}, {"accept": false, "left": "issue:41628", "reason": "Missing AutoImageProcessor and missing PreTrainedModel are distinct import/export problems.", "right": "issue:42548"}, {"accept": false, "left": "issue:31795", "reason": "Documentation confusion in model.forward is not the same as an ImportError from configuration_utils.", "right": "issue:38617"}, {"accept": false, "left": "issue:43576", "reason": "Both are import-adjacent, but they concern different symbols and likely different breakpoints.", "right": "issue:44351"}, {"accept": false, "left": "issue:44908", "reason": "Scheduler kwargs being ignored is unrelated to the unrelated \"racoon\" issue.", "right": "issue:44961"}, {"accept": false, "left": "issue:43576", "reason": "A broken env command and a Python 3.14 load failure are different compatibility/path issues.", "right": "issue:44938"}, {"accept": false, "left": "issue:44485", "reason": "GLM-5 RoPE implementation discussion and an unrelated \"racoon\" issue do not match.", "right": "issue:44961"}], "summary": "These items do not look like one duplicate cluster; they span unrelated documentation, import, tokenizer, scheduler, cache, and runtime/compatibility problems. I would not merge any of the soft pairs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43827|issue:44961", "issue:43673|issue:44492", "issue:43576|issue:43824", "issue:43576|issue:45071", "issue:38617|issue:43576", "issue:33357|issue:33453", "issue:41628|issue:42548", "issue:31795|issue:38617", "issue:43576|issue:44351", "issue:44908|issue:44961", "issue:43576|issue:44938", "issue:44485|issue:44961"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6492, "estimated_input_tokens": 3118, "item_count": 17, "node_count": 17, "serialized_chars": 12471, "soft_pair_count": 10}, "cached_at": "2026-04-14T21:24:14Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "1ebec4095f64e9220d74973fd4476282921d80a910c589fbbcccc869902459d9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36296", "issue:41628", "issue:41669", "issue:43012", "issue:43526", "issue:43576", "issue:43673", "issue:43901", "issue:44261", "issue:44273", "issue:44355", "issue:44360", "issue:44556", "issue:44623", "issue:44961", "issue:45003", "issue:45230"], "result": {"analyst_result": {"best_issue_reason": "Issue 44261 is the best global issue candidate because it is open, well-scoped, and has active discussion; the rest are either unrelated, overly generic, or weaker as standalone reports.", "best_pr_reason": null, "canonical_issue_reason": "Issue 44261 is the most substantive open report in the set, with the highest discussion activity and concrete technical detail, so it is the best anchor if one must be chosen.", "canonical_pr_reason": null, "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "issue:44273", "reason": "Both touch loading/import behavior, but one is a lazy-loading bug and the other is an unsafe sys.modules access issue in modeling_utils; different failure modes and fixes.", "right": "issue:45003"}, {"accept": false, "left": "issue:36296", "reason": "Tensor-parallel training bug vs TextClassificationPipeline docs/behavior mismatch; unrelated subsystems and code paths.", "right": "issue:43901"}, {"accept": false, "left": "issue:41628", "reason": "AutoImageProcessor import failure and errors running compiled Python files are different symptoms with no clear shared root cause.", "right": "issue:44355"}, {"accept": false, "left": "issue:36296", "reason": "A tensor-parallel training bug is not the same as a bfloat16 float32 precision warning; too generic to merge.", "right": "issue:43012"}, {"accept": false, "left": "issue:36296", "reason": "Training parallelism issue vs BeitImageProcessorFast reduce_labels bug; separate components and fixes.", "right": "issue:43526"}, {"accept": false, "left": "issue:44261", "reason": "MLA q_a_layernorm RMS epsilon precision issue is unrelated to the vague 'racoon' report.", "right": "issue:44961"}, {"accept": false, "left": "issue:44261", "reason": "Missing config.rms_norm_eps in MLA q_a_layernorm and a missing ReLU in DSA indexer are different concrete bugs.", "right": "issue:44360"}, {"accept": false, "left": "issue:43576", "reason": "Both mention v5 regressions, but one is a broken CLI env command and the other is checkpoint reload incompatibility; not the same bug.", "right": "issue:44556"}, {"accept": false, "left": "issue:44623", "reason": "Processor.save_pretrained missing files is specific; 'Bug report' is too generic and provides no evidence of the same underlying problem.", "right": "issue:45230"}, {"accept": false, "left": "issue:41669", "reason": "Import-star cleanup/performance issue and GenerationMixin cache missing in chunked_prefill are distinct problems despite both involving model internals.", "right": "issue:43673"}], "summary": "This cluster is a grab bag of unrelated issue reports rather than a duplicate set. No soft pair looks like the same underlying bug/change, so all should be rejected. If forced to pick an anchor, issue 44261 is the strongest standalone report by activity and specificity."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44273|issue:45003", "issue:36296|issue:43901", "issue:41628|issue:44355", "issue:36296|issue:43012", "issue:36296|issue:43526", "issue:44261|issue:44961", "issue:44261|issue:44360", "issue:43576|issue:44556", "issue:44623|issue:45230", "issue:41669|issue:43673"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7162, "estimated_input_tokens": 3453, "item_count": 18, "node_count": 18, "serialized_chars": 13809, "soft_pair_count": 12}, "cached_at": "2026-04-14T21:24:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "271e53bd5bd802ca8e10a3e9680fad25e58304d7392c7bffd7fef9a50cb0886d", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42371", "issue:42890", "issue:43352", "issue:43408", "issue:43576", "issue:43976", "issue:44038", "issue:44246", "issue:44273", "issue:44315", "issue:44393", "issue:44492", "issue:44556", "issue:44704", "issue:44908", "issue:45071", "issue:45092", "issue:45125"], "result": {"analyst_result": {"best_issue_reason": "45071 is the best single issue to stand in for this cluster because it is a core v5 compatibility failure rather than a narrow model-specific symptom.", "best_pr_reason": null, "canonical_issue_reason": "Issue 45071 is the broadest core-regression report in the set: a v5 type-checking break in a central API, which makes it the least narrow representative among mostly model- or feature-specific bugs.", "canonical_pr_reason": null, "confidence": 0.31, "soft_edge_verdicts": [{"accept": false, "left": "issue:44393", "reason": "Different bugs: Qwen3-VL 2D bounding-box hallucination/error vs PretrainedConfig type checking regression.", "right": "issue:45071"}, {"accept": false, "left": "issue:44038", "reason": "Both involve Qwen3 MOE models, but one is a Qwen3-VL-Moe bug and the other is missing tensor-parallel plan support; different code paths and fixes.", "right": "issue:45125"}, {"accept": false, "left": "issue:44556", "reason": "Both are v5 upgrade compatibility reports, but one is checkpoint reload failure and the other is remote-code/meta-init incompatibility; not the same underlying bug.", "right": "issue:45092"}, {"accept": false, "left": "issue:42371", "reason": "TF32 API control vs a typo in cache strategy text; unrelated issues.", "right": "issue:44492"}, {"accept": false, "left": "issue:42890", "reason": "Flaky integration tests from missing set_seed calls are unrelated to inverse_sqrt scheduler kwargs propagation.", "right": "issue:44908"}, {"accept": false, "left": "issue:44246", "reason": "Import slowness and lazy-loading failure are adjacent areas, but they are not the same concrete bug.", "right": "issue:44273"}, {"accept": false, "left": "issue:44704", "reason": "AutoProcessor kwargs forwarding to cached_file and scheduler lr kwargs handling are distinct subsystems and fixes.", "right": "issue:44908"}, {"accept": false, "left": "issue:43408", "reason": "Model-type warning for sam3_video/tracker vs Liger Kernel not applied with model_init; unrelated code paths.", "right": "issue:44315"}, {"accept": false, "left": "issue:43576", "reason": "Broken `transformers env` command is unrelated to scheduler kwarg handling.", "right": "issue:44908"}, {"accept": false, "left": "issue:43976", "reason": "Python version compatibility failure vs Qwen3-VL bounding-box output bug; different problems.", "right": "issue:44393"}, {"accept": false, "left": "issue:43352", "reason": "Flash Attention 2.0 unsupported for NemotronH vs Qwen3-VL bbox hallucination/error; unrelated.", "right": "issue:44393"}, {"accept": false, "left": "issue:44908", "reason": "Scheduler kwargs are ignored in one issue, while the other is a PretrainedConfig type-checking regression; same release era but not same bug.", "right": "issue:45071"}], "summary": "These items are largely heterogeneous and only loosely linked by Transformers v5 / model-compatibility wording. The soft pairs are title-similar but describe different bugs, so none should be merged. The broadest representative issue is the v5 PretrainedConfig regression, but overall cluster coherence is weak."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44393|issue:45071", "issue:44038|issue:45125", "issue:44556|issue:45092", "issue:42371|issue:44492", "issue:42890|issue:44908", "issue:44246|issue:44273", "issue:44704|issue:44908", "issue:43408|issue:44315", "issue:43576|issue:44908", "issue:43976|issue:44393", "issue:43352|issue:44393", "issue:44908|issue:45071"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6736, "estimated_input_tokens": 3240, "item_count": 17, "node_count": 17, "serialized_chars": 12957, "soft_pair_count": 11}, "cached_at": "2026-04-14T21:25:21Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3df1b5da951a2bd10a7ed9dec3c45df552e9794e35bc5e05864e12ba6d1401e9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:31795", "issue:33453", "issue:41669", "issue:43704", "issue:43827", "issue:44077", "issue:44295", "issue:44297", "issue:44315", "issue:44351", "issue:44492", "issue:44829", "issue:44861", "issue:44908", "issue:44945", "issue:45230", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:41669", "reason": "Both mention imports, but one is about removing import * for performance and file explosion, while the other is a missing HybridCache export/import error; different code paths and fixes.", "right": "issue:44351"}, {"accept": false, "left": "issue:44315", "reason": "Liger Kernel not applied with model_init and flash_attention_3 causing degenerate training are distinct model-training bugs with different triggers and remedies.", "right": "issue:44829"}, {"accept": false, "left": "issue:44297", "reason": "tokenizer.save_pretrained writing the wrong tokenizer_class is a specific serialization bug; 'Bug report' is too vague and does not identify the same issue.", "right": "issue:45230"}, {"accept": false, "left": "issue:41669", "reason": "import * usage causing slowdown/file explosion is unrelated to a small testing_utils bug.", "right": "issue:45341"}, {"accept": false, "left": "issue:44295", "reason": "position_ids buffer read errors are a concrete tensor/buffer handling bug, not the same as an unspecified generic bug report.", "right": "issue:45230"}, {"accept": false, "left": "issue:44492", "reason": "A typo in cache strategies is unrelated to incorrect LLM outputs under pipeline parallelism; these are different subsystems and failure modes.", "right": "issue:44945"}, {"accept": false, "left": "issue:41669", "reason": "Import-star performance regression and a cache-strategy typo are not the same underlying defect.", "right": "issue:44492"}, {"accept": false, "left": "issue:43827", "reason": "Docs still referencing pipeline() after removals is a documentation cleanup issue; inverse_sqrt ignoring lr_scheduler_kwargs is a runtime scheduler bug.", "right": "issue:44908"}, {"accept": false, "left": "issue:44077", "reason": "patchtsmixer post_init allowance and _get_tied_weight_keys crashing on list input are unrelated model/config bugs.", "right": "issue:44861"}, {"accept": false, "left": "issue:41669", "reason": "Import * slowdown is unrelated to Qwen3ForCausalLM VRAM leakage in multi-dataloader threads.", "right": "issue:43704"}, {"accept": false, "left": "issue:31795", "reason": "Confusing forward-argument docs and a tokenizer loading regression do not point to the same bug or fix.", "right": "issue:33453"}], "summary": "This is a noisy, false-positive cluster: the issues span unrelated areas (docs, tokenizer loading, cache internals, schedulers, Liger/flash-attn, VRAM leaks, and pipeline parallelism) and do not share a concrete underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:41669|issue:44351", "issue:44315|issue:44829", "issue:44297|issue:45230", "issue:41669|issue:45341", "issue:44295|issue:45230", "issue:44492|issue:44945", "issue:41669|issue:44492", "issue:43827|issue:44908", "issue:44077|issue:44861", "issue:41669|issue:43704", "issue:31795|issue:33453"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7266, "estimated_input_tokens": 3505, "item_count": 18, "node_count": 18, "serialized_chars": 14019, "soft_pair_count": 13}, "cached_at": "2026-04-14T21:25:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "dcfd67491e091dcb39740d05d09c3731e49efb3c06f100326c7fbe4fe10a5e7c", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:31795", "issue:34689", "issue:37428", "issue:43519", "issue:43576", "issue:43723", "issue:43937", "issue:44273", "issue:44315", "issue:44393", "issue:44485", "issue:44655", "issue:44861", "issue:44908", "issue:45071", "issue:45125", "issue:45230", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": "issue:45125 is the best representative issue for the cluster\u2019s only clear subtopic because it names the concrete code-path problem and is easier to deduplicate against than the generic symptom report.", "best_pr_reason": null, "canonical_issue_reason": "issue:45125 is the most concrete technical report in the only plausible duplicate pair: it states the likely root cause (`missing _tp_plan` for tensor parallelism) rather than the broader user-facing load error.", "canonical_pr_reason": null, "confidence": 0.66, "soft_edge_verdicts": [{"accept": false, "left": "issue:43723", "reason": "Tokenizer loading in v5 and Qwen3-VL 2D bbox output are different subsystems and failure modes.", "right": "issue:44393"}, {"accept": false, "left": "issue:44655", "reason": "Pipeline save_pretrained support and tied-weight-key AttributeError are unrelated code paths.", "right": "issue:44861"}, {"accept": false, "left": "issue:43576", "reason": "Broken `transformers env` CLI and lazy-loading behavior are not the same bug.", "right": "issue:44273"}, {"accept": false, "left": "issue:43937", "reason": "GLM-5 generation config validation and Qwen3-VL bbox output errors are distinct model-specific issues.", "right": "issue:44393"}, {"accept": true, "left": "issue:45125", "reason": "Both describe the same Qwen3.5 MoE loading/from_pretrained failure, with 45125 giving the concrete root cause (`_tp_plan` missing).", "right": "issue:45310"}, {"accept": false, "left": "issue:44273", "reason": "Lazy loading and inverse_sqrt scheduler kwargs handling are unrelated.", "right": "issue:44908"}, {"accept": false, "left": "issue:45230", "reason": "A generic bug report is too vague to be confidently the same underlying issue as the Qwen3.5 MoE load error.", "right": "issue:45310"}, {"accept": false, "left": "issue:43519", "reason": "Qwen3VL timestamp calculation and GLM-5 RoPE implementation are different model behaviors.", "right": "issue:44485"}, {"accept": false, "left": "issue:43519", "reason": "Timestamp calculation in a processor is unrelated to scheduler kwargs handling.", "right": "issue:44908"}, {"accept": false, "left": "issue:31795", "reason": "Model.forward docs confusion and Llama 3.2 Vision model loading breakage are not the same bug.", "right": "issue:34689"}, {"accept": false, "left": "issue:44315", "reason": "Liger Kernel application with `model_init` and Qwen3-VL bbox output are different problems.", "right": "issue:44393"}, {"accept": false, "left": "issue:31795", "reason": "Documentation clarity in `model.forward` does not match an import failure in flash-attention utilities.", "right": "issue:37428"}, {"accept": false, "left": "issue:44273", "reason": "Lazy loading behavior and `PretrainedConfig` type checking are unrelated regressions.", "right": "issue:45071"}], "summary": "The items are mostly unrelated bugs and docs issues. The only likely duplicate pair is the Qwen3.5 MoE loading/from_pretrained failure tied to missing `_tp_plan`; the rest do not look like the same underlying problem."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43723|issue:44393", "issue:44655|issue:44861", "issue:43576|issue:44273", "issue:43937|issue:44393", "issue:45125|issue:45310", "issue:44273|issue:44908", "issue:45230|issue:45310", "issue:43519|issue:44485", "issue:43519|issue:44908", "issue:31795|issue:34689", "issue:44315|issue:44393", "issue:31795|issue:37428", "issue:44273|issue:45071"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7416, "estimated_input_tokens": 3580, "item_count": 18, "node_count": 18, "serialized_chars": 14317, "soft_pair_count": 14}, "cached_at": "2026-04-14T21:26:11Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "afc99282bc677266a7f7187ca5442c4cab3c50dd251f5437721d4e029bffc031", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42371", "issue:42890", "issue:42913", "issue:43066", "issue:43502", "issue:43519", "issue:43525", "issue:43618", "issue:43704", "issue:44485", "issue:44492", "issue:44556", "issue:44623", "issue:44861", "issue:44908", "issue:44998", "issue:45290", "issue:45341"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43704", "reason": "VRAM leakage in multi-threaded dataloading is unrelated to a GLM-5 RoPE implementation discussion.", "right": "issue:44485"}, {"accept": false, "left": "issue:42371", "reason": "TF32 configuration guidance and a VRAM leak in Qwen3ForCausalLM are different bug categories with no shared code path.", "right": "issue:43704"}, {"accept": false, "left": "issue:43502", "reason": "Unexpected network calls with local_files_only and a Qwen3VL timestamp bug are separate processor/model issues.", "right": "issue:43519"}, {"accept": false, "left": "issue:43519", "reason": "Wrong timestamp calculation and missing processor save_pretrained files affect different processor behaviors.", "right": "issue:44623"}, {"accept": false, "left": "issue:43519", "reason": "A timestamp computation bug in Qwen3VL is not the same as a VRAM leak in Qwen3ForCausalLM.", "right": "issue:43704"}, {"accept": false, "left": "issue:42913", "reason": "Both concern tokenizer v5, but one reports a general v4-to-v5 behavior change while the other is specifically a wrong decoder type; not the same concrete bug.", "right": "issue:43066"}, {"accept": false, "left": "issue:42890", "reason": "Test flakiness from missing set_seed calls is unrelated to a runtime VRAM leak.", "right": "issue:43704"}, {"accept": false, "left": "issue:44908", "reason": "Scheduler kwargs handling and an unrelated placeholder/junk issue do not match the same defect.", "right": "issue:44998"}, {"accept": false, "left": "issue:44861", "reason": "A tied-weights key AttributeError and a chat-template crash with tool-call messages are different code paths.", "right": "issue:45290"}, {"accept": false, "left": "issue:44998", "reason": "The placeholder 'Unemployment' issue is not the same as a testing_utils bug report.", "right": "issue:45341"}, {"accept": false, "left": "issue:44492", "reason": "A cache-strategy typo and an unrelated placeholder issue are not duplicates.", "right": "issue:44998"}, {"accept": false, "left": "issue:43618", "reason": "CLIPOutput attentions assignment regression is unrelated to the placeholder 'Unemployment' issue.", "right": "issue:44998"}, {"accept": false, "left": "issue:44556", "reason": "Checkpoint reload incompatibility across versions is unrelated to the placeholder issue.", "right": "issue:44998"}, {"accept": false, "left": "issue:43525", "reason": "Missing pad_token_id on Llama4Config and tied-weight key handling are distinct attribute errors in different components.", "right": "issue:44861"}], "summary": "This cluster is a grab-bag of unrelated bug reports. The soft-similarity pairs mostly share broad vocabulary or subsystems, but they do not describe the same concrete defect, so none should be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43704|issue:44485", "issue:42371|issue:43704", "issue:43502|issue:43519", "issue:43519|issue:44623", "issue:43519|issue:43704", "issue:42913|issue:43066", "issue:42890|issue:43704", "issue:44908|issue:44998", "issue:44861|issue:45290", "issue:44998|issue:45341", "issue:44492|issue:44998", "issue:43618|issue:44998", "issue:44556|issue:44998", "issue:43525|issue:44861"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 4986, "estimated_input_tokens": 2365, "item_count": 12, "node_count": 12, "serialized_chars": 9457, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:26:35Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6f6be992d469630a392669626992c4aec443d610612b9d27d2cf9c808db679e9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:31515", "issue:31795", "issue:33453", "issue:36296", "issue:36683", "issue:38617", "issue:43298", "issue:43572", "issue:44230", "issue:44623", "issue:44861", "issue:45310"], "result": {"analyst_result": {"best_issue_reason": "If one issue must represent this set, issue:45310 is the best anchor because it is specific, reproducible, and clearly about a real runtime failure rather than docs or a generic feature request.", "best_pr_reason": null, "canonical_issue_reason": "issue:45310 is the most concrete and actionable bug report in the set: it names a specific from_pretrained failure, a version boundary (transformers>=5.4.0), and a concrete model family (Qwen3.5 MoE).", "canonical_pr_reason": null, "confidence": 0.71, "soft_edge_verdicts": [{"accept": false, "left": "issue:43572", "reason": "Both are config/loading-related, but one is a missing StableLmConfig field after a version update and the other is an AttributeError in tied-weight key handling. Different code paths and different failure modes.", "right": "issue:44861"}, {"accept": false, "left": "issue:31515", "reason": "One reports slow checkpoint loading in from_pretrained; the other is an ImportError for layer_type_validation. Not the same bug.", "right": "issue:38617"}, {"accept": false, "left": "issue:44230", "reason": "FP8 inference support for Qwen models is a feature/support request, while processor.save_pretrained missing files is a packaging/save bug. Different underlying changes.", "right": "issue:44623"}, {"accept": false, "left": "issue:31515", "reason": "Checkpoint loading performance and tensor-parallel training are unrelated problems.", "right": "issue:36296"}, {"accept": false, "left": "issue:31515", "reason": "Slow checkpoint loading is not the same as confusing forward() documentation for input_ids/past_key_values.", "right": "issue:31795"}, {"accept": false, "left": "issue:31515", "reason": "Both mention loading, but one is a performance complaint and the other is a tokenizer-loading regression. Too broad to treat as the same concrete bug.", "right": "issue:33453"}, {"accept": false, "left": "issue:44230", "reason": "Both mention Qwen/MoE loading, but one asks for FP8 support and the other reports a from_pretrained error on a specific version. Not the same fixable code-path problem.", "right": "issue:45310"}, {"accept": false, "left": "issue:31515", "reason": "Checkpoint loading slowness and Gemma3Config missing vocab_size are distinct issues.", "right": "issue:36683"}, {"accept": false, "left": "issue:43298", "reason": "Continuous batching not supporting audio models and Qwen3.5 FP8 inference support are different feature gaps in different subsystems.", "right": "issue:44230"}], "summary": "These issues are mostly only loosely related by broad model-loading/configuration terminology. None of the soft pairs looks like the same underlying bug or change, so I would not merge any of them as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43572|issue:44861", "issue:31515|issue:38617", "issue:44230|issue:44623", "issue:31515|issue:36296", "issue:31515|issue:31795", "issue:31515|issue:33453", "issue:44230|issue:45310", "issue:31515|issue:36683", "issue:43298|issue:44230"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2484, "estimated_input_tokens": 1114, "item_count": 4, "node_count": 4, "serialized_chars": 4454, "soft_pair_count": 6}, "cached_at": "2026-04-14T21:27:28Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "70820322795c9013a91e4a95c24e07c6a005727afa435f8808d3d62379217a2a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:27088", "issue:43834", "issue:44365", "issue:44370"], "result": {"analyst_result": {"best_issue_reason": "#27088 is the clearest and most specific issue to keep as the representative duplicate target for this cluster.", "best_pr_reason": null, "canonical_issue_reason": "issue #27088 is the only one with a concrete language in the title (Turkish), so it best anchors the cluster as a real docs-translation request rather than a placeholder template.", "canonical_pr_reason": null, "confidence": 0.78, "soft_edge_verdicts": [{"accept": true, "left": "issue:27088", "reason": "Same underlying docs-translation issue pattern; body similarity is high and the title difference appears to be templated language labeling rather than a distinct bug.", "right": "issue:44370"}, {"accept": true, "left": "issue:27088", "reason": "Near-identical translation request with only template-level language placeholders differing; no evidence of a distinct underlying issue.", "right": "issue:44365"}, {"accept": true, "left": "issue:27088", "reason": "Same i18n docs-translation task family; highly similar body text suggests a duplicate or template variant rather than a separate issue.", "right": "issue:43834"}, {"accept": true, "left": "issue:43834", "reason": "These two look like the same templated docs-translation issue with only minor placeholder/body variation.", "right": "issue:44370"}, {"accept": true, "left": "issue:43834", "reason": "Essentially the same templated translation request; body overlap is near-total and no distinct target is visible.", "right": "issue:44365"}, {"accept": true, "left": "issue:44365", "reason": "Near-duplicate template issues with almost identical bodies and no distinguishing issue target or code-path difference.", "right": "issue:44370"}], "summary": "This cluster is a set of highly similar documentation-translation issues, mostly templated duplicates of the same i18n request pattern. The Turkish-specific issue is the most concrete representative, while the others look like near-identical template instances."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is grounded in the packet: one issue has a concrete language in the title, while the others are clear template placeholders, and the pairwise Jaccard scores are very high across the cluster. The duplicate/near-duplicate judgments are conservative enough for this evidence."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["issue:27088|issue:44370", "issue:27088|issue:44365", "issue:27088|issue:43834", "issue:43834|issue:44370", "issue:43834|issue:44365", "issue:44365|issue:44370"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 7316, "estimated_input_tokens": 3530, "item_count": 13, "node_count": 13, "serialized_chars": 14120, "soft_pair_count": 24}, "cached_at": "2026-04-14T21:27:48Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e2dd0be849b37e749324e16c54f2a8c06618f37b733551ce3e83395c64fa38f9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:36817", "issue:37015", "issue:41211", "issue:42503", "issue:42738", "issue:43671", "issue:43837", "issue:43909", "issue:44121", "issue:44205", "issue:45027", "issue:45177", "issue:45322"], "result": {"analyst_result": {"best_issue_reason": "No issue in this set is a safe duplicate canonical; they are only loosely related by being model-addition requests.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue fits: these requests target different models and capabilities, so they are not the same underlying bug/change.", "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "issue:43909", "reason": "Both are model-add requests, but LFM2.5 Audio 1.5B and SAM3-LiteText are different model families and tasks.", "right": "issue:44205"}, {"accept": false, "left": "issue:43671", "reason": "Qwen3-TTS and Qwen3-ASR are related family names, but they are different capabilities and not the same change.", "right": "issue:43837"}, {"accept": false, "left": "issue:44121", "reason": "Weight-sparse transformer support and EUPE are unrelated model architectures/requests.", "right": "issue:45322"}, {"accept": false, "left": "issue:43837", "reason": "ASR support versus audio model addition are different feature requests, not one concrete duplicate.", "right": "issue:43909"}, {"accept": false, "left": "issue:43837", "reason": "Qwen3-ASR and EUPE target different model types and implementation paths.", "right": "issue:45322"}, {"accept": false, "left": "issue:37015", "reason": "NeoBERT and LFM2.5 Audio 1.5B are unrelated model additions.", "right": "issue:43909"}, {"accept": false, "left": "issue:43671", "reason": "Qwen3-TTS support and DeepSeek-OCR2 are different modalities and code paths.", "right": "issue:45177"}, {"accept": false, "left": "issue:37015", "reason": "NeoBERT and Qwen3-ASR are unrelated model requests.", "right": "issue:43837"}, {"accept": false, "left": "issue:43671", "reason": "Qwen3-TTS and Voxtral-4B-TTS-2603 are both TTS-related, but they are different models and not the same underlying request.", "right": "issue:45027"}, {"accept": false, "left": "issue:37015", "reason": "NeoBERT and OpenAI weight-sparse transformer support are different model families and implementation targets.", "right": "issue:44121"}, {"accept": false, "left": "issue:43837", "reason": "ASR support and sparse-transformer support are different features with different code paths.", "right": "issue:44121"}, {"accept": false, "left": "issue:43837", "reason": "Qwen3-ASR and DeepSeek-OCR2 are different modalities and model requests.", "right": "issue:45177"}, {"accept": false, "left": "issue:43671", "reason": "TTS support for Qwen3 and LFM2.5 Audio are related only broadly by audio, but not the same model/change.", "right": "issue:43909"}, {"accept": false, "left": "issue:43671", "reason": "Qwen3-TTS and circuit-sparsity transformer support are unrelated changes.", "right": "issue:44121"}, {"accept": false, "left": "issue:41211", "reason": "DEIMv2 and Qwen3-TTS are different model additions with different implementation needs.", "right": "issue:43671"}, {"accept": false, "left": "issue:36817", "reason": "EuroBert config support and DEIMv2 are unrelated model requests.", "right": "issue:41211"}, {"accept": false, "left": "issue:37015", "reason": "NeoBERT and ModernVBERT are distinct model requests; similar naming does not make them duplicates.", "right": "issue:42503"}, {"accept": false, "left": "issue:36817", "reason": "EuroBert config support and Qwen3-TTS are unrelated model changes.", "right": "issue:43671"}, {"accept": false, "left": "issue:37015", "reason": "NeoBERT and Voxtral-4B-TTS-2603 are different models and tasks.", "right": "issue:45027"}, {"accept": false, "left": "issue:42738", "reason": "BERT-like models with RoPE and Voxtral TTS are not the same concrete request.", "right": "issue:45027"}, {"accept": false, "left": "issue:43837", "reason": "Qwen3-ASR and Voxtral TTS are different model support requests.", "right": "issue:45027"}, {"accept": false, "left": "issue:42503", "reason": "ModernVBERT models and Qwen3-ASR are different feature requests.", "right": "issue:43837"}, {"accept": false, "left": "issue:42503", "reason": "ModernVBERT models and BERT-like models with RoPE overlap only thematically, not as duplicates.", "right": "issue:42738"}, {"accept": false, "left": "issue:42503", "reason": "ModernVBERT models and Qwen3-TTS are unrelated model additions.", "right": "issue:43671"}], "summary": "This cluster is a set of distinct model-request issues, not duplicates: different model families, different modalities (text, audio, ASR/TTS, OCR, sparse transformer, perception encoder), and no shared concrete code change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43909|issue:44205", "issue:43671|issue:43837", "issue:44121|issue:45322", "issue:43837|issue:43909", "issue:43837|issue:45322", "issue:37015|issue:43909", "issue:43671|issue:45177", "issue:37015|issue:43837", "issue:43671|issue:45027", "issue:37015|issue:44121", "issue:43837|issue:44121", "issue:43837|issue:45177", "issue:43671|issue:43909", "issue:43671|issue:44121", "issue:41211|issue:43671", "issue:36817|issue:41211", "issue:37015|issue:42503", "issue:36817|issue:43671", "issue:37015|issue:45027", "issue:42738|issue:45027", "issue:43837|issue:45027", "issue:42503|issue:43837", "issue:42503|issue:42738", "issue:42503|issue:43671"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 974, "estimated_input_tokens": 359, "item_count": 2, "node_count": 2, "serialized_chars": 1434, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:27:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "0907a1b34a1ba66651e2dae7e946095d177a986876b3f96e5f0ce3191e950f26", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:42503", "issue:45027"], "result": {"analyst_result": {"best_issue_reason": "issue #45027 is the best issue candidate because it is open, specific, and currently engaged; #42503 is a separate older request for a different model family.", "best_pr_reason": null, "canonical_issue_reason": "issue #45027 is the more active and current request, with ongoing discussion and inbound references; it is the better representative for this cluster if one must be chosen.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:42503", "reason": "Different model support requests for distinct models (ModernVBERT vs Voxtral-4B-TTS-2603); no shared underlying bug or code change.", "right": "issue:45027"}], "summary": "These are two unrelated model-support requests: one for ModernVBERT and one for Voxtral-4B-TTS-2603. They do not describe the same underlying bug or change, so they should not be clustered as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:42503|issue:45027"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3402, "estimated_input_tokens": 1573, "item_count": 6, "node_count": 6, "serialized_chars": 6291, "soft_pair_count": 8}, "cached_at": "2026-04-14T21:28:14Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f9f0e8ed02afd5eb0d4b7d7d1d88c6b90442e4d09f423a4ad602b3e0655ea657", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:37934", "issue:42936", "issue:43240", "issue:43668", "issue:43813", "issue:44031"], "result": {"analyst_result": {"best_issue_reason": "#44031 is the best standalone issue to prioritize because it has the widest apparent impact and clearest regression framing.", "best_pr_reason": null, "canonical_issue_reason": "#44031 is the strongest representative issue in this set: it describes a broad, user-visible regression with a clear version boundary and affects many tokenizer code paths.", "canonical_pr_reason": null, "confidence": 0.9, "soft_edge_verdicts": [{"accept": false, "left": "issue:37934", "reason": "Different models and different failure modes: a possible Llama4 normalization design question vs. a ModernBERT config type-hint mistake.", "right": "issue:43668"}, {"accept": false, "left": "issue:42936", "reason": "Mask2Former ignore_value handling is a model-specific parameter bug, while #43240 is a generic loss kwargs plumbing issue.", "right": "issue:43240"}, {"accept": false, "left": "issue:42936", "reason": "Unrelated subsystems and symptoms: Mask2Former training config bug vs. tokenizer regex warning regression.", "right": "issue:44031"}, {"accept": false, "left": "issue:42936", "reason": "Mask2Former ignore_value behavior and a quantization attribute typo are different bugs with no shared code-path indicated.", "right": "issue:43813"}, {"accept": false, "left": "issue:42936", "reason": "Different models and problem classes: unused ignore_value vs. incorrect type annotation for norm_eps.", "right": "issue:43668"}, {"accept": false, "left": "issue:43240", "reason": "Generic loss kwargs propagation bug is not the same as a config type-hint error; no overlapping concrete code path is shown.", "right": "issue:43668"}, {"accept": false, "left": "issue:43668", "reason": "ModernBERTConfig type hint issue and tokenizer regex warning regression are unrelated changes in different parts of the library.", "right": "issue:44031"}, {"accept": false, "left": "issue:43668", "reason": "A type-hint problem in ModernBERTConfig is not the same as a typo in a quantization attribute name.", "right": "issue:43813"}], "summary": "These are six distinct issue reports touching different models/subsystems and bug types (norm math, unused config, kwargs plumbing, type hints, typo, tokenizer regex warning). None looks like a true duplicate of another."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:37934|issue:43668", "issue:42936|issue:43240", "issue:42936|issue:44031", "issue:42936|issue:43813", "issue:42936|issue:43668", "issue:43240|issue:43668", "issue:43668|issue:44031", "issue:43668|issue:43813"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 996, "estimated_input_tokens": 370, "item_count": 2, "node_count": 2, "serialized_chars": 1479, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:28:26Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "490dd9e57cc2a71b4f003f28c61302b00a02e57d650acd918673cdaeac49c13b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:41084", "issue:43037"], "result": {"analyst_result": {"best_issue_reason": "If one issue must be picked as the better representative, #43037 is more concrete and actionable, but it is not a duplicate of #41084.", "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:41084", "reason": "Different underlying requests with no shared code-path evidence: block decoding behavior vs model/version support for DeepSeek v3.2.", "right": "issue:43037"}], "summary": "The two issues are about different topics: one is a vague request for block decoding, the other is a specific feature request for DeepSeek v3.2 support. They do not look like duplicates or the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:41084|issue:43037"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1048, "estimated_input_tokens": 396, "item_count": 2, "node_count": 2, "serialized_chars": 1582, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:28:42Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a3558157ccd8cdd6bbf12299062678a5b42a68cf325739a082623274f863d7aa", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43598", "issue:43600"], "result": {"analyst_result": {"best_issue_reason": "If one must be chosen as the closest cluster representative, issue 43598 is slightly more specific to Trainer behavior, but neither issue is a strong duplicate representative for the other.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue: the titles and described problems are different enough that they should not be merged as duplicates.", "canonical_pr_reason": null, "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "issue:43598", "reason": "Reject: `remove_unused_column` customizability and FLOPS estimation for custom trainers are separate Trainer concerns with different symptoms, code paths, and likely fixes.", "right": "issue:43600"}], "summary": "These are two distinct closed issues that only share a loose Trainer/custom-trainer theme: one is about `remove_unused_column` customizability, the other about FLOPS estimation for custom trainers. They do not describe the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43598|issue:43600"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1062, "estimated_input_tokens": 403, "item_count": 2, "node_count": 2, "serialized_chars": 1611, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:28:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "69d4838ba3676230573b0aae2e6e35d4d0b02cd31fbfe95d3fc364f429b26c3a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43684", "issue:44405"], "result": {"analyst_result": {"best_issue_reason": "Issue 43684 is the broader, earlier request and is the better representative if one issue must be chosen, but it should not subsume 44405.", "best_pr_reason": null, "canonical_issue_reason": "No single canonical issue; the requests target different model families and different auto-model classes.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "issue:43684", "reason": "Different underlying changes: Qwen3-Omni model registration for generic/conditional-generation auto classes vs Qwen3.5 sequence-classification support. They do not look like the same bug or fix.", "right": "issue:44405"}], "summary": "These are not duplicates: one requests Qwen3-Omni registration for AutoModel/AutoModelForConditionalGeneration, the other requests AutoModelForSequenceClassification support for Qwen3.5. Different model families and different heads."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43684|issue:44405"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2388, "estimated_input_tokens": 1066, "item_count": 4, "node_count": 4, "serialized_chars": 4264, "soft_pair_count": 6}, "cached_at": "2026-04-14T21:29:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "8390bab7b5cb8dce444eb5039eabd011a3dad730616e1e9b3adc1201189ac0ee", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:43835", "issue:43845", "issue:43846", "issue:43859"], "result": {"analyst_result": {"best_issue_reason": "43845 is the strongest issue to keep as the cluster representative: earliest timestamp, same generic title as the other huggingface issues, and no evidence that 43835 is related.", "best_pr_reason": null, "canonical_issue_reason": "Issue 43845 is the earliest of the nearly identical \u2018huggingface \u2019 reports, so it is the best canonical representative within that subgroup.", "canonical_pr_reason": null, "confidence": 0.58, "soft_edge_verdicts": [{"accept": false, "left": "issue:43835", "reason": "Only weak title similarity; \u2018Fett\u2019 and \u2018huggingface \u2019 do not indicate the same underlying bug, and there are no shared targets/filenames or body clues.", "right": "issue:43845"}, {"accept": false, "left": "issue:43835", "reason": "Only weak title similarity; \u2018Fett\u2019 is unrelated to the generic huggingface reports, with no supporting evidence of a shared bug.", "right": "issue:43846"}, {"accept": false, "left": "issue:43835", "reason": "Only weak title similarity; there is no concrete evidence connecting \u2018Fett\u2019 to the huggingface issue reports.", "right": "issue:43859"}, {"accept": false, "left": "issue:43845", "reason": "Same generic title, but the bodies are too sparse to verify the same underlying bug; title similarity alone is not enough.", "right": "issue:43846"}, {"accept": false, "left": "issue:43845", "reason": "Both are titled \u2018huggingface \u2019, but there is no concrete description, target, or code-path evidence to confirm they are duplicates.", "right": "issue:43859"}, {"accept": false, "left": "issue:43846", "reason": "Both are titled \u2018huggingface \u2019, but the reports are too underspecified to safely merge as the same issue.", "right": "issue:43859"}], "summary": "The cluster contains one unrelated issue (\u201cFett\u201d) and three very weakly specified issues with the same generic title (\u201chuggingface \u201d). There isn\u2019t enough evidence to merge any pair as confirmed duplicates, but 43845 is the best representative among the similar huggingface issues because it is the earliest one."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:43835|issue:43845", "issue:43835|issue:43846", "issue:43835|issue:43859", "issue:43845|issue:43846", "issue:43845|issue:43859", "issue:43846|issue:43859"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3154, "estimated_input_tokens": 1449, "item_count": 5, "node_count": 5, "serialized_chars": 5794, "soft_pair_count": 9}, "cached_at": "2026-04-14T21:29:31Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "f2844d3d941e9f9175ea562da243680407ea1db7a03b11511901e534fe09f3a8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:44371", "issue:44418", "issue:44572", "issue:44573", "issue:44937"], "result": {"analyst_result": {"best_issue_reason": "issue:44418 is the least ambiguous and most human-readable issue in the cluster, but the cluster is too noisy to strongly recommend any true canonical issue.", "best_pr_reason": null, "canonical_issue_reason": "issue:44418 is the most descriptive of the set; the others are either '' placeholders or similarly low-information titles, so it is the best available anchor if one must be chosen.", "canonical_pr_reason": null, "confidence": 0.86, "soft_edge_verdicts": [{"accept": false, "left": "issue:44371", "reason": "Both are titled '', but there is no body/content evidence that they report the same underlying issue or change; treat as separate spam submissions.", "right": "issue:44572"}, {"accept": false, "left": "issue:44371", "reason": "Identical spam-like titles are not enough to establish a duplicate; no substantive problem statement is available.", "right": "issue:44573"}, {"accept": false, "left": "issue:44371", "reason": "One is a spam placeholder and the other is a generic documentation suggestion; they do not appear to be the same underlying issue.", "right": "issue:44418"}, {"accept": false, "left": "issue:44371", "reason": "The titles are unrelated ('' vs 'Check out \"Google LLC\"'); no shared bug/change is evident.", "right": "issue:44937"}, {"accept": false, "left": "issue:44418", "reason": "Generic documentation suggestion versus spam placeholder; no evidence they describe the same request or defect.", "right": "issue:44572"}, {"accept": false, "left": "issue:44418", "reason": "Generic documentation suggestion versus spam placeholder; not the same underlying issue.", "right": "issue:44573"}, {"accept": false, "left": "issue:44572", "reason": "Both are titled '', but there is no actual bug/change description to confirm they are duplicates rather than separate spam reports.", "right": "issue:44573"}, {"accept": false, "left": "issue:44572", "reason": "Spam placeholder and an unrelated promotional-looking title; no basis for deduplication.", "right": "issue:44937"}, {"accept": false, "left": "issue:44573", "reason": "Spam placeholder and an unrelated promotional-looking title; no evidence of a shared underlying issue.", "right": "issue:44937"}], "summary": "These are low-signal, likely unrelated placeholder/spam issues. The only clear similarity is identical spam-like titles on a few issues, but there is no substantive bug/change evidence to justify deduping."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["issue:44371|issue:44572", "issue:44371|issue:44573", "issue:44371|issue:44418", "issue:44371|issue:44937", "issue:44418|issue:44572", "issue:44418|issue:44573", "issue:44572|issue:44573", "issue:44572|issue:44937", "issue:44573|issue:44937"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1080, "estimated_input_tokens": 412, "item_count": 2, "node_count": 2, "serialized_chars": 1646, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:29:52Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "99f53a8ce71f8a07655e27a5a4bc59969834dcdb7b195ac52d68f35a50765792", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:45145", "issue:45151"], "result": {"analyst_result": {"best_issue_reason": "Issue #45145 is the strongest representative of the cluster since it is the earliest submission and has the most linkage activity among the two.", "best_pr_reason": null, "canonical_issue_reason": "Issue #45145 is the better canonical choice because it is earlier and already has an inbound reference, while #45151 appears to be a near-identical duplicate.", "canonical_pr_reason": null, "confidence": 0.99, "soft_edge_verdicts": [{"accept": true, "left": "issue:45145", "reason": "Same title and highly similar content; no evidence of distinct underlying bug or change.", "right": "issue:45151"}], "summary": "These two closed issues appear to be duplicates of the same report: identical title, very similar body length, and no differentiating targets or filenames."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The assessment is grounded in the packet: both issues have identical titles, very similar body lengths, no filenames or explicit targets, and issue 45145 is earlier with the only inbound reference. The duplicate/near-duplicate conclusion is phrased cautiously enough, and the soft-edge verdict is consistent with the available similarity signal."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["issue:45145|issue:45151"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 898, "estimated_input_tokens": 321, "item_count": 2, "node_count": 2, "serialized_chars": 1284, "soft_pair_count": 0}, "cached_at": "2026-04-14T21:29:58Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "33b797cab310b5d73dd7dfc83857ab5951fc3c891e63bc5d2a8ece07749a11dc", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["issue:45295", "issue:45373"], "result": {"analyst_result": {"best_issue_reason": "#45373 is the best single issue to track because it is more specific, easier to act on, and fully covers the broader request in #45295.", "best_pr_reason": null, "canonical_issue_reason": "#45373 is the clearer canonical issue: it names the exact missing class (`Gemma4ForSequenceClassification`), notes the module gap, and relates it directly to the existing Gemma 2/3 implementation.", "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [], "summary": "Both issues request the same missing Gemma 4 sequence-classification support. Issue #45373 is the more specific, actionable report and appears to subsume #45295."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": [], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 17236, "estimated_input_tokens": 8490, "item_count": 12, "node_count": 12, "serialized_chars": 33958, "soft_pair_count": 20}, "cached_at": "2026-04-14T21:30:31Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7774b6573d0e43630631c3b9e7bed087913dcb3bd038b89e15efa3da822e860b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:39895", "pull_request:42668", "pull_request:43251", "pull_request:43254", "pull_request:43424", "pull_request:43823", "pull_request:43995", "pull_request:44827", "pull_request:45078", "pull_request:45170", "pull_request:45213", "pull_request:45433"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43251 is the best overall PR in the duplicate pair because it is the clearer, more mature version of the same fix and should subsume 43254.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43251 is the better canonical duplicate for the cross-entropy kwargs fix: it targets the same function, explicitly links the issue, and has the more complete review history. PR 43254 is a near-identical alternate patch.", "confidence": 0.95, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:39895", "reason": "Unrelated changes: new VideoPrism model/docs vs an ExecuTorch dynamic-shapes test.", "right": "pull_request:43424"}, {"accept": false, "left": "pull_request:42668", "reason": "Different workstreams: processor-from-pretrained robustness vs adding MobileLLM model support.", "right": "pull_request:43823"}, {"accept": false, "left": "pull_request:42668", "reason": "Both touch auto-processing/tokenization, but one is a processor-loading robustness change and the other is tokenizer conversion/error handling.", "right": "pull_request:45078"}, {"accept": false, "left": "pull_request:43251", "reason": "Loss utility kwargs fix and a new model addition are not the same underlying change.", "right": "pull_request:43823"}, {"accept": false, "left": "pull_request:43251", "reason": "Cross-entropy helper fix vs Mistral4 test/integration work; different concrete problems.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:43424", "reason": "ExecuTorch export test and Mistral4 test fixes are unrelated.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:43424", "reason": "Dynamic-shape export test vs MobileLLM implementation; not the same bug/change.", "right": "pull_request:43823"}, {"accept": false, "left": "pull_request:43823", "reason": "MobileLLM model scaffolding and tokenizer auto behavior are separate changes.", "right": "pull_request:45078"}, {"accept": false, "left": "pull_request:43823", "reason": "New model addition vs Falcon output-interface refactor; different code paths and goals.", "right": "pull_request:43995"}, {"accept": false, "left": "pull_request:43823", "reason": "MobileLLM addition and Mistral4 test fixes are unrelated.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:43823", "reason": "New model support vs a model-creation skill/doc PR; not the same underlying change.", "right": "pull_request:45213"}, {"accept": false, "left": "pull_request:43995", "reason": "Falcon refactor and Mistral4 test fixes do not address the same bug.", "right": "pull_request:44827"}, {"accept": false, "left": "pull_request:43995", "reason": "Falcon model refactoring and tokenizer auto conversion logic are unrelated.", "right": "pull_request:45078"}, {"accept": false, "left": "pull_request:43995", "reason": "Model refactor vs meta skill/doc PR; no shared concrete code fix.", "right": "pull_request:45213"}, {"accept": false, "left": "pull_request:44827", "reason": "Mistral4 test fixes and SonicMoe kernel integration are separate changes in different subsystems.", "right": "pull_request:45433"}, {"accept": false, "left": "pull_request:44827", "reason": "Mistral4 test fixes vs `layrnorm`->`layernorm` renaming/cross-model conversion cleanup.", "right": "pull_request:45170"}, {"accept": false, "left": "pull_request:44827", "reason": "Different bugs: Mistral4 test cleanup vs tokenizer auto selection/error handling.", "right": "pull_request:45078"}, {"accept": false, "left": "pull_request:45078", "reason": "Tokenizer auto behavior fix and a model-creation skill are unrelated.", "right": "pull_request:45213"}, {"accept": false, "left": "pull_request:44827", "reason": "Mistral4 test fixes vs meta skill/doc PR; not mergeable as one change.", "right": "pull_request:45213"}, {"accept": true, "left": "pull_request:43251", "reason": "Both PRs patch the same `fixed_cross_entropy` call to pass supported kwargs (`weight` and `label_smoothing`) into `cross_entropy`.", "right": "pull_request:43254"}], "summary": "Only one soft pair looks like a true duplicate: the two `fixed_cross_entropy` PRs both implement the same kwargs passthrough to `nn.functional.cross_entropy`. All other pairs are broader-area similarities rather than the same concrete bug/change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:39895|pull_request:43424", "pull_request:42668|pull_request:43823", "pull_request:42668|pull_request:45078", "pull_request:43251|pull_request:43823", "pull_request:43251|pull_request:44827", "pull_request:43424|pull_request:44827", "pull_request:43424|pull_request:43823", "pull_request:43823|pull_request:45078", "pull_request:43823|pull_request:43995", "pull_request:43823|pull_request:44827", "pull_request:43995|pull_request:45078", "pull_request:43995|pull_request:45213", "pull_request:43995|pull_request:44827", "pull_request:44827|pull_request:45433", "pull_request:44827|pull_request:45170", "pull_request:44827|pull_request:45078", "pull_request:45078|pull_request:45213", "pull_request:44827|pull_request:45213", "pull_request:43823|pull_request:45213", "pull_request:43251|pull_request:43254"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3870, "estimated_input_tokens": 1807, "item_count": 2, "node_count": 2, "serialized_chars": 7226, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:30:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "55dff05834894697df0e314e22de639398601899ed97b9e1e16b8a886e9df1a7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40546", "pull_request:42781"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #40546 is the best representative of the cluster since it establishes the main VibeVoice model support; #42781 is a later draft for realtime/acoustic-tokenizer additions rather than the same change.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #40546 is the better canonical PR because it is the core, non-draft VibeVoice implementation with substantial review activity and broader package/auto-doc integration.", "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:40546", "reason": "Both concern VibeVoice, but #40546 implements the base model while #42781 adds realtime support and an acoustic tokenizer. They share subsystem files because #42781 builds on the earlier work, not because they are the same concrete change, so this should not be merged as a duplicate.", "right": "pull_request:42781"}], "summary": "Two related VibeVoice pull requests overlap heavily in model infrastructure, but one is the base integration and the other is a separate realtime extension. They should not be deduplicated."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:40546|pull_request:42781"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2594, "estimated_input_tokens": 1169, "item_count": 2, "node_count": 2, "serialized_chars": 4675, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:30:57Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "6e275ebc8b77f989d023603b90b6bad827877771da0d14f54d077bbedfee95e7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:40755", "pull_request:41224"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #41224 is the more concrete and established change, with substantial review activity and a focused model-support addition; #40755 is a separate, unrelated feature PR.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:40755", "reason": "Different models, different codepaths, and different feature goals (TimesFM covariates vs DINOv3 image classification); not the same underlying change.", "right": "pull_request:41224"}], "summary": "These two PRs are unrelated feature additions: one adds TimesFM covariate forecasting support, the other adds DINOv3 image classification support. They share only a broad Transformers context, not the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:40755|pull_request:41224"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3618, "estimated_input_tokens": 1681, "item_count": 2, "node_count": 2, "serialized_chars": 6722, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:31:10Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "9944ed602e915a1efc520372e61f09dcc2f77887105d5401b8b3916b27ce0ac8", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:41797", "pull_request:45186"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:41797", "reason": "Different model integrations with distinct filenames, docs, and test suites; shared auto-registration/repo plumbing is generic and does not indicate the same underlying bug or change.", "right": "pull_request:45186"}], "summary": "The two pull requests are separate model additions for different architectures (DeepSeek OCR vs Isaac). They share repository-wide model registration files, but the concrete code, docs, and tests are unrelated, so this is not a duplicate pair."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:41797|pull_request:45186"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2960, "estimated_input_tokens": 1352, "item_count": 2, "node_count": 2, "serialized_chars": 5407, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:31:23Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "efa01757b4c470a89d5647a8e39b4e74139b718f2ee311b85c69053fcde5fec0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42166", "pull_request:45426"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:42166", "reason": "Different models and different feature sets; the shared files are only generic registry/auto-config updates, not evidence of the same underlying code change or bug fix.", "right": "pull_request:45426"}], "summary": "The two pull requests are unrelated feature additions: one adds InternVLFlash, the other adds AXK1. Their overlap is limited to shared registration/auto-generated model plumbing, which is not enough to treat them as the same change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:42166|pull_request:45426"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 6638, "estimated_input_tokens": 3191, "item_count": 5, "node_count": 5, "serialized_chars": 12764, "soft_pair_count": 8}, "cached_at": "2026-04-14T21:32:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "21e58977b4d391a3e8b3f32cbca1c0943bc81bce95360e69edbd21365f34d5b9", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42227", "pull_request:43291", "pull_request:43665", "pull_request:44815", "pull_request:45403"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #42227 is the best overall PR candidate because it is the most concrete, least noisy, and most clearly tied to a specific user-facing bug.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #42227 is the cleanest representative: it has an explicit issue link, a focused Whisper pipeline bug fix, and a small targeted diff with matching tests.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:42227", "reason": "Both touch Whisper, but they fix different paths: return-language forwarding in the ASR pipeline versus decode/timestamp handling in the tokenizer. Not the same bug.", "right": "pull_request:43291"}, {"accept": false, "left": "pull_request:42227", "reason": "One is a Whisper ASR bug fix; the other is unrelated test-file edits for CLIP/ViT. No shared concrete problem.", "right": "pull_request:43665"}, {"accept": false, "left": "pull_request:42227", "reason": "Whisper return-language plumbing and FP8/dequant loading changes are unrelated subsystems and bugs.", "right": "pull_request:44815"}, {"accept": false, "left": "pull_request:43291", "reason": "Both involve tests, but one is Whisper tokenizer/decode behavior and the other is a test helper refactor for common model inference checks. Different underlying issues.", "right": "pull_request:45403"}, {"accept": false, "left": "pull_request:43291", "reason": "Whisper decode logic and CLIP/ViT test edits do not address the same code-path problem.", "right": "pull_request:43665"}, {"accept": false, "left": "pull_request:43291", "reason": "Tokenizer/decoder changes for Whisper are unrelated to FP8 quantization/dequant fixes.", "right": "pull_request:44815"}, {"accept": false, "left": "pull_request:43665", "reason": "Both are test-oriented, but they target different models and different failures; not the same change.", "right": "pull_request:45403"}, {"accept": false, "left": "pull_request:43665", "reason": "A bogus-looking test-only CLIP/ViT edit is not the same as the Mistral4 FP8 dequant fix.", "right": "pull_request:44815"}], "summary": "The cluster is mostly unrelated PRs that all happen to be weakly similar by title or broad model/test area. No pair looks like the same underlying change with enough specificity to merge as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:42227|pull_request:43291", "pull_request:42227|pull_request:43665", "pull_request:42227|pull_request:44815", "pull_request:43291|pull_request:45403", "pull_request:43291|pull_request:43665", "pull_request:43291|pull_request:44815", "pull_request:43665|pull_request:45403", "pull_request:43665|pull_request:44815"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3224, "estimated_input_tokens": 1484, "item_count": 2, "node_count": 2, "serialized_chars": 5933, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:34:28Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3d27c23169e4a7b8f787ef193f6037fa8c646d6c33eb35445b4fcd1781675f0b", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42975", "pull_request:42976"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 42976 is the best single representative because it covers the wider maintenance update and has the more general scope/title, whereas PR 42975 is centered on Node 24 compatibility for a narrower set of actions.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 42976 is the broader umbrella change: it upgrades multiple GitHub Actions to newer versions across several workflows, and its title is the most general fit for a cluster representative.", "confidence": 0.89, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:42975", "reason": "Do not merge: 42975 updates checkout/cache/upload-artifact for Node 24 compatibility, while 42976 updates docker/build-push-action and other actions to latest versions. They touch similar workflow files, but they are different action migrations with different concrete purposes and could be merged separately.", "right": "pull_request:42976"}], "summary": "Both items are maintenance PRs updating GitHub Actions workflow dependencies, but they target different action families and different upgrade goals. They overlap in affected workflow files, yet they do not look like the same concrete change set."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:42975|pull_request:42976"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5330, "estimated_input_tokens": 2537, "item_count": 3, "node_count": 3, "serialized_chars": 10145, "soft_pair_count": 3}, "cached_at": "2026-04-14T21:34:47Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2b204e08a71e099374e684e2679cdd3a2ce6b0c51219c9ca75e2b9460e7965e7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:42978", "pull_request:43448", "pull_request:43451"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "If one must be chosen as a representative, PR #42978 is the most self-contained due to its explicit linked issue and focused single-model scope, but it is not a duplicate of the others.", "canonical_issue_reason": null, "canonical_pr_reason": "No canonical PR for deduping here; these PRs are about different models and are not mergeable into a single fix/change.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:42978", "reason": "ViT NEPA vs Molmo are different models with different implementation files and docs; shared auto-registration/docs boilerplate is not enough to treat them as the same change.", "right": "pull_request:43448"}, {"accept": false, "left": "pull_request:42978", "reason": "ViT NEPA and Molmo2 are unrelated model additions. They touch some of the same Transformers plumbing, but they fix/add different concrete models and codepaths.", "right": "pull_request:43451"}, {"accept": false, "left": "pull_request:43448", "reason": "Molmo and Molmo2 are adjacent names but still distinct model families with separate source trees and documentation; this is not one underlying bug or mergeable change.", "right": "pull_request:43451"}], "summary": "All three pull requests are separate model additions, not duplicates: ViT NEPA, Molmo, and Molmo2 each introduce different model families and distinct codepaths. The only overlap is expected boilerplate around docs and auto-registration."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:42978|pull_request:43448", "pull_request:42978|pull_request:43451", "pull_request:43448|pull_request:43451"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2538, "estimated_input_tokens": 1141, "item_count": 2, "node_count": 2, "serialized_chars": 4563, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:34:55Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "bdff12eccb9772a9094ed9512a55ae0d0c72925841eea798f77ba7cd761e80e7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43488", "pull_request:43532"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43532 is the strongest standalone representative: it has more discussion/review activity and a clearer, broader code-path change than the repo-bot test PR.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43532 is the better representative because it has broader review activity and touches the main workflow/notification path, making it more informative as the cluster exemplar.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43488", "reason": "Reject: these PRs do not fix the same concrete problem. 43488 is a packaging/version/dependency test change, while 43532 changes CI/workflow and notification-related code. The shared ViT test filename appears incidental and they would not plausibly merge into one PR.", "right": "pull_request:43532"}], "summary": "Two open PRs, but they address different areas: one is a repo-bot/version/dependency test change, the other is a workflow/notification diff-display change. The only overlap is a shared ViT test file, which looks incidental rather than the same underlying fix."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43488|pull_request:43532"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3298, "estimated_input_tokens": 1521, "item_count": 2, "node_count": 2, "serialized_chars": 6083, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:35:07Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "b3f01127834c1099f1850f70f6ae4244e52b509e62a46c5a78178d1f4327179a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43612", "pull_request:43613"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "Neither PR is a good canonical representative for a duplicate cluster because they implement different features rather than the same fix; if forced, PR 43613 is broader, but not a duplicate of 43612.", "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.97, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43612", "reason": "Reject: they add different pipelines (concept vs visual segmentation) with different task docs, model integrations, and test files; shared infra changes are not enough to treat them as the same PR.", "right": "pull_request:43613"}], "summary": "Two open PRs add different segmentation pipelines: one for Promptable Concept Segmentation and one for Promptable Visual Segmentation. They share documentation and registry plumbing, but they target different tasks, model sets, and code paths, so they are not duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43612|pull_request:43613"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2402, "estimated_input_tokens": 1073, "item_count": 2, "node_count": 2, "serialized_chars": 4291, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:35:17Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "92fd629ef5ecb4476ed8d2270e8f529d649bd3b9695ae45c3e1a562d1919c775", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43651", "pull_request:43663"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": null, "canonical_issue_reason": null, "canonical_pr_reason": null, "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43651", "reason": "Different code paths and different goals: GA loss scaling vs. dataloader column/signature filtering. Shared file alone is not enough to treat them as the same PR.", "right": "pull_request:43663"}], "summary": "These two PRs only overlap in `src/transformers/trainer.py` but change different trainer behaviors: one adds a hook for gradient-accumulation loss scaling, the other refactors signature-column filtering for custom trainers. They are not the same underlying change and should not be merged as duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43651|pull_request:43663"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 3252, "estimated_input_tokens": 1498, "item_count": 3, "node_count": 3, "serialized_chars": 5992, "soft_pair_count": 2}, "cached_at": "2026-04-14T21:36:24Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "7dd67e9306aa0c9033f983eaa260c8ff8804830a5ad5179ac659ddd5b48fb771", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43656", "pull_request:43836", "pull_request:43842"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #43842 best represents the underlying bug because it directly names the failure mode, stays narrowly scoped to the affected code path, and is easier to merge as the canonical fix than the broader annotation cleanup in #43836.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #43842 is the cleanest, most targeted fix for the concrete runtime failure: it explicitly addresses the `TypeAdapter` NameError when `pydantic` is absent, with a minimal change set in `serve.py`.", "confidence": 0.73, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43656", "reason": "Both touch `serve.py`, but #43656 looks like a broader annotation/future-annotations cleanup around `TimedModel`, not the same specific `TypeAdapter`-missing-`pydantic` NameError fixed by #43842.", "right": "pull_request:43842"}, {"accept": true, "left": "pull_request:43836", "reason": "Both are alternate fixes for the same `TypeAdapter` name-resolution problem in `serve.py` and share the same issue target; they look like overlapping implementations of the same underlying runtime bug.", "right": "pull_request:43842"}], "summary": "Two PRs appear to be alternate fixes for the same `TypeAdapter`/annotation NameError in `src/transformers/cli/serve.py`; the third PR looks like a separate typing-related cleanup in the same file rather than the same bug."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "The summary is grounded in the packet and stays conservative. The 43836/43842 pair does look like overlapping fixes for the same TypeAdapter NameError in `serve.py`, while 43656 appears broader and not clearly the same bugfix based on the diff preview. No overstatement stands out."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:43656|pull_request:43842", "pull_request:43836|pull_request:43842"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2560, "estimated_input_tokens": 1152, "item_count": 2, "node_count": 2, "serialized_chars": 4608, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:36:45Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "a486ed5323032f99f7840a331d1736f23b689f0faab1a7000260db763a7511db", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43757", "pull_request:45118"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#45118 is the best representative because it resolves the same user-facing GGUF loading problem with a fuller implementation, docs, and tests.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45118 is the more complete GPT-OSS GGUF fix, explicitly supersedes #43757, and adds the actual loading support rather than a stopgap fallback.", "confidence": 0.88, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:43757", "reason": "Same underlying GPT-OSS GGUF loading path in the same utility file; #45118 expands and supersedes the workaround in #43757, so they are close enough to treat as duplicate-related.", "right": "pull_request:45118"}], "summary": "Both PRs address GPT-OSS GGUF loading; the second is a broader follow-up that supersedes the first by moving from a fallback workaround to full support."}, "error_kind": null, "error_message": null, "evaluator_result": {"accept": true, "feedback": "Accept. The packet supports the core claim: both PRs target the same GPT-OSS GGUF loading problem, they share `modeling_gguf_pytorch_utils.py`, and #45118 explicitly says it supersedes #43757 while adding fuller support. The summary and soft-edge verdict stay within the evidence and do not overstate it."}, "evaluator_used": true, "retried": false}, "soft_pairs": ["pull_request:43757|pull_request:45118"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2438, "estimated_input_tokens": 1091, "item_count": 2, "node_count": 2, "serialized_chars": 4362, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:36:56Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "e7c674adcc031aa8883109a5dfc678b4407fb20b10764302092e384d031ba83a", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43779", "pull_request:43816"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43779 is the best representative of the cluster because it directly implements the missing SwanLabCallback behavior; PR 43816 looks secondary/doc-focused.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43779 is the stronger canonical PR because it contains the actual callback implementation to pass through id/resume kwargs, which is the core functional change.", "confidence": 0.9, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43779", "reason": "Same issue target and subsystem, but the previews show different scopes: 43779 changes callback initialization to support resume/id, while 43816 mainly adds docstring entries. Not the same concrete code-path change, so do not merge as duplicates.", "right": "pull_request:43816"}], "summary": "Both PRs target the same SwanLab resume/id feature and the same file, but one is the implementation change and the other appears to be mostly documentation/supporting text. They should not be merged as duplicates for triage."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43779|pull_request:43816"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 4624, "estimated_input_tokens": 2184, "item_count": 3, "node_count": 3, "serialized_chars": 8736, "soft_pair_count": 2}, "cached_at": "2026-04-14T21:37:07Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3d087bcadfd9802305de9f04c4781d778c5a9c0c790fb8769116f95f4e264503", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43838", "pull_request:43973", "pull_request:45283"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 43838 is the strongest standalone representative because it introduces a complete new model integration and has the clearest issue linkage; the other PRs are narrower and unrelated to it.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 43838 is the most substantial and centralized change set, with broad model/auto/processor additions and an explicit issue target, so it is the best representative artifact in this mixed cluster.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43838", "reason": "Different features and code paths: Qwen3-ASR model integration vs Qwen3.5 GGUF loading support. Same broad transformer ecosystem, but not the same underlying change.", "right": "pull_request:45283"}, {"accept": false, "left": "pull_request:43973", "reason": "Lfm2Audio model/docs addition and Qwen3.5 GGUF loading are unrelated changes in different model families with no shared concrete bug or fix.", "right": "pull_request:45283"}], "summary": "These are three unrelated pull requests: Qwen3-ASR support, Lfm2Audio model/docs support, and Qwen3.5 GGUF loading support. They touch different model families and different code paths, so this is not a duplicate cluster."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43838|pull_request:45283", "pull_request:43973|pull_request:45283"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 17644, "estimated_input_tokens": 8694, "item_count": 14, "node_count": 14, "serialized_chars": 34773, "soft_pair_count": 24}, "cached_at": "2026-04-14T21:38:44Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "4983703ddec7656295b939b42586d97440d7fca4469d8dae33518e2818429038", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43996", "pull_request:44007", "pull_request:44013", "pull_request:44018", "pull_request:44019", "pull_request:44044", "pull_request:44066", "pull_request:44068", "pull_request:44071", "pull_request:44072", "pull_request:44085", "pull_request:44086", "pull_request:44116", "pull_request:44722"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44722 is the best representative PR in the duplicate-like subset because it is the broadest and most complete GPT-J output-tracing refactor in this set.", "canonical_issue_reason": null, "canonical_pr_reason": "44722 is the most complete/latest representative of the overlapping GPT-J output-tracing refactor work, with the GPT-J block change plus the copied CodeGen cleanup.", "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43996", "reason": "Different model codepaths (CVT/FNet vs GPT-J); same tracking theme only, not the same change.", "right": "pull_request:44085"}, {"accept": false, "left": "pull_request:43996", "reason": "Different models and implementations; both are output-tracing refactors, but not the same underlying bug/change.", "right": "pull_request:44044"}, {"accept": true, "left": "pull_request:44066", "reason": "Both touch GPT-J output tracing in modeling_gptj.py and appear to refactor the same concrete code path.", "right": "pull_request:44085"}, {"accept": false, "left": "pull_request:44007", "reason": "ResNet and EfficientNet are separate model implementations; same refactor pattern, not a duplicate change.", "right": "pull_request:44072"}, {"accept": false, "left": "pull_request:44072", "reason": "EfficientNet vs GPT-J: unrelated model codepaths despite similar decorator-based refactor intent.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44066", "reason": "GPT-J vs EfficientNet are different concrete implementations, so this is not the same underlying fix.", "right": "pull_request:44072"}, {"accept": false, "left": "pull_request:44013", "reason": "MobileNetV2 and DeBERTa v2 are unrelated codepaths; only the refactor style matches.", "right": "pull_request:44044"}, {"accept": false, "left": "pull_request:44066", "reason": "GPT-J output tracing vs MGP-STR output tracing are different model changes, not one mergeable duplicate.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44066", "reason": "GPT-J and MPT are different models; same refactor family but not the same code-path problem.", "right": "pull_request:44071"}, {"accept": true, "left": "pull_request:44018", "reason": "Both are GPT-Neo output-tracing refactors on the same file/code path and look like alternate versions of the same change.", "right": "pull_request:44068"}, {"accept": false, "left": "pull_request:44066", "reason": "GPT-J and GPT-Neo are different model implementations, so these are not duplicates.", "right": "pull_request:44068"}, {"accept": false, "left": "pull_request:44018", "reason": "Different models (GPT-Neo vs GPT-J); similar refactor theme only.", "right": "pull_request:44066"}, {"accept": false, "left": "pull_request:44019", "reason": "ResNet and GPT-J are unrelated model implementations and do not fix the same code path.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44086", "reason": "MGP-STR vs GPT-J are different model codepaths; not the same underlying change.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44019", "reason": "ResNet vs GPT-J are different model implementations; shared output-tracing motif is too broad.", "right": "pull_request:44085"}, {"accept": false, "left": "pull_request:44018", "reason": "GPT-Neo and ResNet are separate model-specific refactors, not one duplicate fix.", "right": "pull_request:44019"}, {"accept": false, "left": "pull_request:44019", "reason": "ResNet vs MPT are unrelated concrete implementations.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44068", "reason": "GPT-Neo and Flaubert touch different model codepaths; same decorator migration theme only.", "right": "pull_request:44116"}, {"accept": false, "left": "pull_request:44068", "reason": "GPT-Neo vs GPT-J are different model-specific changes, so not duplicates.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44018", "reason": "GPT-Neo and Flaubert are separate model implementations and not the same bug/change.", "right": "pull_request:44116"}, {"accept": false, "left": "pull_request:44071", "reason": "MPT and GPT-J are different codepaths; no evidence they are the same underlying change.", "right": "pull_request:44085"}], "summary": "This cluster is mostly a theme cluster of output-tracing refactors across many different model implementations, not one duplicate bug. The only likely duplicate-style edges are within the GPT-Neo and GPT-J subgroups; the rest are same initiative but different code paths/models."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43996|pull_request:44085", "pull_request:43996|pull_request:44044", "pull_request:44066|pull_request:44085", "pull_request:44007|pull_request:44072", "pull_request:44072|pull_request:44722", "pull_request:44066|pull_request:44072", "pull_request:44013|pull_request:44044", "pull_request:44066|pull_request:44086", "pull_request:44066|pull_request:44071", "pull_request:44018|pull_request:44068", "pull_request:44066|pull_request:44068", "pull_request:44018|pull_request:44066", "pull_request:44019|pull_request:44722", "pull_request:44086|pull_request:44722", "pull_request:44019|pull_request:44085", "pull_request:44018|pull_request:44019", "pull_request:44019|pull_request:44071", "pull_request:44068|pull_request:44116", "pull_request:44068|pull_request:44722", "pull_request:44018|pull_request:44116", "pull_request:44071|pull_request:44722", "pull_request:44018|pull_request:44086", "pull_request:44018|pull_request:44085", "pull_request:44071|pull_request:44085"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 19372, "estimated_input_tokens": 9558, "item_count": 16, "node_count": 16, "serialized_chars": 38231, "soft_pair_count": 24}, "cached_at": "2026-04-14T21:39:21Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "c693dca202400388b55146b08377a974a429a33170da06e8b7ec21e50e015ba7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43997", "pull_request:44017", "pull_request:44018", "pull_request:44056", "pull_request:44059", "pull_request:44068", "pull_request:44071", "pull_request:44073", "pull_request:44074", "pull_request:44076", "pull_request:44085", "pull_request:44086", "pull_request:44098", "pull_request:44101", "pull_request:44161", "pull_request:44722"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "44161 is the best single representative of this cluster because it most clearly embodies the shared refactor pattern and appears more complete than the smaller model-specific variants.", "canonical_issue_reason": null, "canonical_pr_reason": "44161 is the clearest representative of the theme: a substantial, self-contained standardized output-tracing refactor with an explicit Fixes #43979 link and broad implementation coverage.", "confidence": 0.82, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44068", "reason": "Both are output-tracing refactors, but they target different models (GPT-Neo vs MGP-STR) and different code paths.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44059", "reason": "GPT-2 and MGP-STR are separate model implementations; same refactor theme, not the same change.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44059", "reason": "These are model-specific refactors in different code paths (GPT-2 vs GPT-J/CodeGen), so they are not duplicates.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44085", "reason": "GPT-J output tracing and MGP-STR output tracing are distinct model changes, not one underlying fix.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44098", "reason": "ViLT and Flaubert/XLM are unrelated model implementations; the similarity is only the standardized refactor pattern.", "right": "pull_request:44101"}, {"accept": false, "left": "pull_request:44018", "reason": "GPT-Neo and MPT are different model code paths; both refactor output handling, but they are separate changes.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44017", "reason": "SegFormer and GPT-J/CodeGen are unrelated models, so this is not the same bug or PR.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44017", "reason": "SegFormer and LongT5 are separate model refactors; they share a pattern but not a concrete duplicate change.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:44017", "reason": "SegFormer and VisualBert touch different architectures and code paths, so they should not be merged as duplicates.", "right": "pull_request:44073"}, {"accept": false, "left": "pull_request:44068", "reason": "Both are output tracing refactors, but GPT-Neo and MPT are different model implementations with separate edits.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44071", "reason": "MPT and MGP-STR are unrelated model files and changes; similarity is only at the refactor level.", "right": "pull_request:44086"}, {"accept": false, "left": "pull_request:44017", "reason": "SegFormer and MPT are different model code paths, so this is not a duplicate change.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44059", "reason": "GPT-2 and MPT are distinct model refactors; no shared concrete bug or fix.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and VisualBert are different architectures; the refactor pattern is similar but the changes are not the same.", "right": "pull_request:44073"}, {"accept": false, "left": "pull_request:44073", "reason": "VisualBert and Flaubert/XLM are separate model implementations, so they are not duplicates.", "right": "pull_request:44101"}, {"accept": false, "left": "pull_request:44059", "reason": "GPT-2 and GPT-Neo are different model implementations; same standardized-output theme, but not one change.", "right": "pull_request:44068"}, {"accept": false, "left": "pull_request:44056", "reason": "MPNet and Flaubert/XLM are different code paths and different refactors.", "right": "pull_request:44101"}, {"accept": false, "left": "pull_request:44073", "reason": "VisualBert and LongT5 are unrelated model-specific refactors, not the same underlying fix.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:44101", "reason": "Flaubert/XLM and LongT5 are separate model changes; similarity is only the output-tracing migration.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:44017", "reason": "SegFormer and GPT-Neo are different model refactors, so they should not be deduplicated.", "right": "pull_request:44018"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and MPNet are unrelated model implementations with separate output-tracing changes.", "right": "pull_request:44056"}, {"accept": false, "left": "pull_request:44071", "reason": "MPT and ImageGPT are different models; these are separate standardized-output refactors.", "right": "pull_request:44076"}, {"accept": false, "left": "pull_request:44074", "reason": "TextNet and Flaubert/XLM are different model code paths and therefore not duplicates.", "right": "pull_request:44101"}], "summary": "This cluster is a set of similar but model-specific refactors to standardized output tracing/capture_outputs, not duplicates of one underlying bug or change. The PRs touch different model implementations and should stay separate."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44068|pull_request:44086", "pull_request:44059|pull_request:44086", "pull_request:44059|pull_request:44722", "pull_request:44085|pull_request:44086", "pull_request:44098|pull_request:44101", "pull_request:44018|pull_request:44071", "pull_request:44017|pull_request:44722", "pull_request:44017|pull_request:44161", "pull_request:44017|pull_request:44073", "pull_request:44068|pull_request:44071", "pull_request:44071|pull_request:44086", "pull_request:44017|pull_request:44071", "pull_request:44059|pull_request:44071", "pull_request:43997|pull_request:44073", "pull_request:44073|pull_request:44101", "pull_request:44059|pull_request:44068", "pull_request:44018|pull_request:44059", "pull_request:44056|pull_request:44101", "pull_request:44073|pull_request:44161", "pull_request:44101|pull_request:44161", "pull_request:44017|pull_request:44018", "pull_request:43997|pull_request:44056", "pull_request:44071|pull_request:44076", "pull_request:44074|pull_request:44101"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 20438, "estimated_input_tokens": 10091, "item_count": 17, "node_count": 17, "serialized_chars": 40364, "soft_pair_count": 23}, "cached_at": "2026-04-14T21:40:08Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "3de935753b8f57416f7db944f0d92773c5156ad0d0e2c985750b71087be17ecf", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43996", "pull_request:43997", "pull_request:44007", "pull_request:44010", "pull_request:44017", "pull_request:44018", "pull_request:44044", "pull_request:44056", "pull_request:44066", "pull_request:44071", "pull_request:44073", "pull_request:44074", "pull_request:44085", "pull_request:44129", "pull_request:44154", "pull_request:44161", "pull_request:44722"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44066 is the best cluster representative because it most clearly captures the shared GPT-J/CodeGen change and is better scoped than the broader, model-mixed refactor PRs.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44066 is the clearest representative: it is focused on the GPT-J/CodeGen standardized output-tracing refactor, has the explicit shared issue target #43979, and its file set matches the concrete code path being changed.", "confidence": 0.79, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44056", "reason": "Both are output-tracing refactors, but they affect different models and code paths (MPNet vs LongT5), so they are not the same concrete change.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and GPT-Neo are unrelated model implementations; same refactor theme and shared issue target are not enough to make this a duplicate.", "right": "pull_request:44018"}, {"accept": false, "left": "pull_request:43997", "reason": "These target different models and different forward paths, so they are separate refactors rather than one mergeable change.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:44074", "reason": "TextNet and LongT5 touch different architectures and execution paths; they are not the same underlying bug or change.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet vs MPT are distinct model code paths, so this is only a thematic similarity, not a duplicate.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44010", "reason": "SqueezeBERT and MPT are different models; both use output-tracing refactors, but not the same concrete fix.", "right": "pull_request:44071"}, {"accept": false, "left": "pull_request:44010", "reason": "These are separate model-specific refactors in unrelated code paths (SqueezeBERT vs VisualBERT).", "right": "pull_request:44073"}, {"accept": false, "left": "pull_request:44018", "reason": "GPT-Neo and SpeechT5 are unrelated implementations; the overlap is only in the refactor pattern.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and TextNet are different model families and do not appear to address the same code-path problem.", "right": "pull_request:44074"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and SpeechT5 are unrelated models, so these are not duplicate fixes.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:44010", "reason": "SqueezeBERT and GPT-Neo are distinct model implementations; same refactor theme is insufficient to merge them.", "right": "pull_request:44018"}, {"accept": false, "left": "pull_request:44071", "reason": "MPT and SpeechT5 are different model code paths, so this is not the same change.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:43996", "reason": "CVT/FNet and LongT5 are separate model refactors with no shared concrete code-path fix.", "right": "pull_request:44161"}, {"accept": false, "left": "pull_request:43996", "reason": "CVT/FNet and SegFormer are different model families; the shared output-tracing theme is too broad to treat as a duplicate.", "right": "pull_request:44017"}, {"accept": false, "left": "pull_request:44010", "reason": "SqueezeBERT and DeBERTa-v2 are different model code paths; these do not look like the same underlying bug or change.", "right": "pull_request:44044"}, {"accept": false, "left": "pull_request:44044", "reason": "DeBERTa-v2 and TextNet are unrelated model implementations, so they are not duplicates.", "right": "pull_request:44074"}, {"accept": true, "left": "pull_request:44066", "reason": "Both PRs touch the same GPT-J/CodeGen files, target the same issue #43979, and implement the same standardized output-tracing refactor.", "right": "pull_request:44722"}, {"accept": true, "left": "pull_request:44085", "reason": "Despite the odd title, both PRs are centered on GPT-J output tracing in the same file and are aiming at the same standardized decorator-based refactor.", "right": "pull_request:44722"}, {"accept": false, "left": "pull_request:44085", "reason": "GPT-J and SpeechT5 are different models, so they are not the same concrete fix.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:43996", "reason": "These are separate model refactors (CVT/FNet vs ResNet/RT-DETR ResNet), not one underlying code-path change.", "right": "pull_request:44007"}, {"accept": false, "left": "pull_request:43996", "reason": "CVT/FNet and SpeechT5 are unrelated implementations, so this is not a duplicate pair.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:44044", "reason": "DeBERTa-v2 and SpeechT5 are unrelated model paths; only the refactor pattern overlaps.", "right": "pull_request:44129"}, {"accept": false, "left": "pull_request:44044", "reason": "DeBERTa-v2 and Vits are distinct models and do not appear to fix the same bug or change.", "right": "pull_request:44154"}], "summary": "This cluster is mostly a set of model-specific output-tracing refactors under issue #43979, but most are separate per-model changes rather than true duplicates. The only clear duplicate-like overlap is the GPT-J/CodeGen refactor pair."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44056|pull_request:44161", "pull_request:43997|pull_request:44018", "pull_request:43997|pull_request:44161", "pull_request:44074|pull_request:44161", "pull_request:43997|pull_request:44071", "pull_request:44010|pull_request:44071", "pull_request:44010|pull_request:44073", "pull_request:44018|pull_request:44129", "pull_request:43997|pull_request:44074", "pull_request:43997|pull_request:44129", "pull_request:44010|pull_request:44018", "pull_request:44071|pull_request:44129", "pull_request:43996|pull_request:44161", "pull_request:43996|pull_request:44017", "pull_request:44010|pull_request:44044", "pull_request:44044|pull_request:44074", "pull_request:44066|pull_request:44722", "pull_request:44085|pull_request:44722", "pull_request:44085|pull_request:44129", "pull_request:43996|pull_request:44007", "pull_request:43996|pull_request:44129", "pull_request:44044|pull_request:44129", "pull_request:44044|pull_request:44154"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 8818, "estimated_input_tokens": 4281, "item_count": 8, "node_count": 8, "serialized_chars": 17124, "soft_pair_count": 6}, "cached_at": "2026-04-14T21:40:25Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "988d264ba6c24724a9800e84e4845da09674600fe6610c86b5058d0265f51b18", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43997", "pull_request:44010", "pull_request:44024", "pull_request:44056", "pull_request:44073", "pull_request:44074", "pull_request:44076", "pull_request:44129"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44129 is the most complete and well-scoped example of the shared output-capture migration pattern, with an explicit tracking issue and clearer refactor intent.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44129 is the strongest representative because it has an explicit issue target, a clearly scoped model-specific refactor, and more complete implementation context than the others.", "confidence": 0.92, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44024", "reason": "Both are standardized output-tracing refactors, but they touch different models (FocalNet vs ImageGPT) and different forward paths; not the same concrete change.", "right": "pull_request:44076"}, {"accept": false, "left": "pull_request:44073", "reason": "Both migrate to capture_outputs/can_return_tuple, but VisualBert and TextNet are separate model implementations and not mergeable as one PR.", "right": "pull_request:44074"}, {"accept": false, "left": "pull_request:44056", "reason": "These are similar refactors in different architectures (MPNet vs VisualBert); they share a pattern, not a single underlying bug or code path.", "right": "pull_request:44073"}, {"accept": false, "left": "pull_request:44056", "reason": "MPNet and TextNet are unrelated model-specific output-tracing changes; the overlap is only in the refactor style.", "right": "pull_request:44074"}, {"accept": false, "left": "pull_request:43997", "reason": "RegNet and SqueezeBert are distinct model code paths, so this is only thematic similarity rather than the same change.", "right": "pull_request:44010"}, {"accept": false, "left": "pull_request:44010", "reason": "SqueezeBert and SpeechT5 both adjust output-capture plumbing, but they are separate model-specific implementations and not a single duplicate fix.", "right": "pull_request:44129"}], "summary": "These PRs are all model-specific output-tracing refactors, but they affect different architectures and code paths, so they should not be collapsed into one duplicate cluster. The similarity is mostly in the shared migration pattern, not in a single underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44024|pull_request:44076", "pull_request:44073|pull_request:44074", "pull_request:44056|pull_request:44073", "pull_request:44056|pull_request:44074", "pull_request:43997|pull_request:44010", "pull_request:44010|pull_request:44129"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 16312, "estimated_input_tokens": 8028, "item_count": 13, "node_count": 13, "serialized_chars": 32109, "soft_pair_count": 24}, "cached_at": "2026-04-14T21:41:09Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "78fc6564b492e96fcba193a31efb30f0d95314aa1c81c5982e0280d59cb67575", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43998", "pull_request:43999", "pull_request:44000", "pull_request:44001", "pull_request:44002", "pull_request:44003", "pull_request:44004", "pull_request:44025", "pull_request:44026", "pull_request:44027", "pull_request:44028", "pull_request:44029", "pull_request:44030"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44003 is the strongest single representative of the cluster because it makes the most substantive tracing changes and spans the closest pair of related models here.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 44003 is the broadest and most representative refactor in the set, touching two closely related Mamba-family models and the new output-capture plumbing.", "confidence": 0.94, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:43998", "reason": "Same refactor theme, but timm_backbone and mamba are unrelated model paths; not the same change.", "right": "pull_request:44003"}, {"accept": false, "left": "pull_request:44001", "reason": "Both are output-tracing refactors, but univnet and mamba are different architectures with different forward paths.", "right": "pull_request:44003"}, {"accept": false, "left": "pull_request:44002", "reason": "upernet segmentation code and mamba stateful LM code are unrelated; only the refactor style matches.", "right": "pull_request:44003"}, {"accept": false, "left": "pull_request:44000", "reason": "vision_text_dual_encoder and mamba are different subsystems; this is similarity in naming, not the same bug/change.", "right": "pull_request:44003"}, {"accept": false, "left": "pull_request:44003", "reason": "mamba/falcon_mamba tracing changes are not the same as superpoint feature extraction changes.", "right": "pull_request:44028"}, {"accept": false, "left": "pull_request:44003", "reason": "codegen and mamba modify different forward implementations; no shared concrete code-path issue.", "right": "pull_request:44004"}, {"accept": false, "left": "pull_request:44000", "reason": "Both are refactors, but they affect unrelated models and do not look mergeable as one PR.", "right": "pull_request:44004"}, {"accept": false, "left": "pull_request:43999", "reason": "mobilenet_v1 and univnet are unrelated model families; same tracing pattern only.", "right": "pull_request:44001"}, {"accept": false, "left": "pull_request:43999", "reason": "mobilenet_v1 backbone/classifier code is not the same underlying change as upernet segmentation output handling.", "right": "pull_request:44002"}, {"accept": false, "left": "pull_request:43999", "reason": "mobilenet_v1 and depth_anything are separate model implementations; no duplicate code-path problem.", "right": "pull_request:44025"}, {"accept": false, "left": "pull_request:43999", "reason": "mobilenet_v1 and vision_encoder_decoder target different model stacks; only the refactor motif overlaps.", "right": "pull_request:44026"}, {"accept": false, "left": "pull_request:43999", "reason": "mobilenet_v1 and speech_encoder_decoder are different architectures and not the same fix.", "right": "pull_request:44027"}, {"accept": false, "left": "pull_request:43998", "reason": "timm_backbone and upernet are different model components; the shared wording does not imply duplicate work.", "right": "pull_request:44002"}, {"accept": false, "left": "pull_request:43998", "reason": "timm_backbone and vision_text_dual_encoder touch unrelated forward logic.", "right": "pull_request:44000"}, {"accept": false, "left": "pull_request:44026", "reason": "vision_encoder_decoder wrapper code and rwkv decoder internals are unrelated changes.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:43998", "reason": "timm_backbone and univnet do not share a concrete bug or code path.", "right": "pull_request:44001"}, {"accept": false, "left": "pull_request:44029", "reason": "rwkv and DPR are distinct models; similar output-tracing cleanup is not enough to merge.", "right": "pull_request:44030"}, {"accept": false, "left": "pull_request:44025", "reason": "depth_anything and vision_encoder_decoder are different model families; not the same underlying refactor.", "right": "pull_request:44026"}, {"accept": false, "left": "pull_request:44025", "reason": "depth_anything and speech_encoder_decoder have different forward contracts and code paths.", "right": "pull_request:44027"}, {"accept": false, "left": "pull_request:44025", "reason": "depth_anything and superpoint are unrelated vision models; no duplicate fix.", "right": "pull_request:44028"}, {"accept": false, "left": "pull_request:44026", "reason": "vision_encoder_decoder and speech_encoder_decoder are separate wrappers with different internals.", "right": "pull_request:44027"}, {"accept": false, "left": "pull_request:44026", "reason": "vision_encoder_decoder and superpoint do not appear to address the same underlying issue.", "right": "pull_request:44028"}, {"accept": false, "left": "pull_request:44027", "reason": "speech_encoder_decoder and superpoint are unrelated; same refactor pattern only.", "right": "pull_request:44028"}, {"accept": false, "left": "pull_request:44001", "reason": "univnet and upernet are different model families, so these are not duplicates.", "right": "pull_request:44002"}], "summary": "These PRs all implement a similar output-tracing refactor pattern, but they target different model architectures and code paths, so they are not duplicates of the same underlying change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:43998|pull_request:44003", "pull_request:44001|pull_request:44003", "pull_request:44002|pull_request:44003", "pull_request:44000|pull_request:44003", "pull_request:44003|pull_request:44028", "pull_request:44003|pull_request:44004", "pull_request:44000|pull_request:44004", "pull_request:43999|pull_request:44001", "pull_request:43999|pull_request:44002", "pull_request:43999|pull_request:44025", "pull_request:43999|pull_request:44026", "pull_request:43999|pull_request:44027", "pull_request:43998|pull_request:44002", "pull_request:43998|pull_request:44000", "pull_request:44026|pull_request:44029", "pull_request:43998|pull_request:44001", "pull_request:44029|pull_request:44030", "pull_request:44025|pull_request:44026", "pull_request:44025|pull_request:44027", "pull_request:44025|pull_request:44028", "pull_request:44026|pull_request:44027", "pull_request:44026|pull_request:44028", "pull_request:44027|pull_request:44028", "pull_request:44001|pull_request:44002"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 13578, "estimated_input_tokens": 6661, "item_count": 11, "node_count": 11, "serialized_chars": 26642, "soft_pair_count": 19}, "cached_at": "2026-04-14T21:41:34Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "92e1d535679e2b51f763118af18a69a2b45127150f618cc787794f02dfa006d7", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:43998", "pull_request:44000", "pull_request:44001", "pull_request:44002", "pull_request:44004", "pull_request:44025", "pull_request:44026", "pull_request:44027", "pull_request:44028", "pull_request:44029", "pull_request:44030"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #44002 is the best single exemplar of the cluster\u2019s common refactor style, but it is not a duplicate of the other PRs. The rest are separate model-specific changes (different files and forward signatures), so there is no global merge candidate here.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #44002 is the cleanest, most representative instance of the pattern: a direct `can_return_tuple`/`auto_docstring` refactor in one model, with the usual removal of explicit output-control args. It is a better canonical representative than the more complex multi-file or more specialized PRs.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44000", "reason": "Same broad refactor label, but different models (`vision_text_dual_encoder` vs `univnet`) and different forward APIs; not the same underlying bug/change.", "right": "pull_request:44001"}, {"accept": false, "left": "pull_request:44000", "reason": "Different model subsystems and different signatures; both add output-tracing helpers, but they are independent PRs, not one fix.", "right": "pull_request:44002"}, {"accept": false, "left": "pull_request:43998", "reason": "`timm_backbone` refactor vs `rwkv` refactor; unrelated model code paths and different implementation details.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:44002", "reason": "Both are output-tracing refactors, but for different architectures (`upernet` vs `rwkv`) with distinct forward behavior and changes.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:43998", "reason": "`timm_backbone` helper cleanup vs `codegen` cache/output-signature changes; not the same concrete change.", "right": "pull_request:44004"}, {"accept": false, "left": "pull_request:44001", "reason": "Different models and different code paths; the shared theme is too broad to treat as a duplicate.", "right": "pull_request:44004"}, {"accept": false, "left": "pull_request:44002", "reason": "`upernet` segmentation forward refactor and `codegen` generation/cache refactor are distinct changes, not mergeable as one fix.", "right": "pull_request:44004"}, {"accept": false, "left": "pull_request:44025", "reason": "`depth_anything`/`prompt_depth_anything` vs `rwkv`; separate model families and separate edits.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:44025", "reason": "Both are refactors around output handling, but they touch different models and code paths (`depth_anything` vs `dpr`).", "right": "pull_request:44030"}, {"accept": false, "left": "pull_request:44026", "reason": "`vision_encoder_decoder` wrapper changes are not the same underlying change as `DPR` encoder pooling/output handling.", "right": "pull_request:44030"}, {"accept": false, "left": "pull_request:44027", "reason": "`speech_encoder_decoder` and `rwkv` are unrelated implementations; the common helper usage is not enough for duplication.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:44027", "reason": "These affect different model wrappers and different forward signatures; not the same concrete fix.", "right": "pull_request:44030"}, {"accept": false, "left": "pull_request:44028", "reason": "`superpoint` and `rwkv` have different forward logic and separate output-capturing rewrites.", "right": "pull_request:44029"}, {"accept": false, "left": "pull_request:44028", "reason": "Different model code paths (`superpoint` vs `dpr`), so they are not duplicates.", "right": "pull_request:44030"}, {"accept": false, "left": "pull_request:44001", "reason": "Different architectures and different files; the similarity is only the generic output-tracing refactor pattern.", "right": "pull_request:44025"}, {"accept": false, "left": "pull_request:44001", "reason": "`vision_text_dual_encoder` and `vision_encoder_decoder` are separate wrapper models; same theme, not same change.", "right": "pull_request:44026"}, {"accept": false, "left": "pull_request:44002", "reason": "Separate model families and separate forward modifications; not one underlying bug or change.", "right": "pull_request:44025"}, {"accept": false, "left": "pull_request:44000", "reason": "`vision_text_dual_encoder` vs `depth_anything` are unrelated model-specific refactors.", "right": "pull_request:44025"}, {"accept": false, "left": "pull_request:44000", "reason": "Different wrapper models and different API adjustments; too broad to merge as duplicate.", "right": "pull_request:44026"}], "summary": "These PRs all follow the same broad refactor theme (\u201coutput tracing\u201d), but they target different model implementations and different code paths. They are not duplicates of one another and would not plausibly be merged into a single PR without becoming an unrelated multi-model sweep."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44000|pull_request:44001", "pull_request:44000|pull_request:44002", "pull_request:43998|pull_request:44029", "pull_request:44002|pull_request:44029", "pull_request:43998|pull_request:44004", "pull_request:44001|pull_request:44004", "pull_request:44002|pull_request:44004", "pull_request:44025|pull_request:44029", "pull_request:44025|pull_request:44030", "pull_request:44026|pull_request:44030", "pull_request:44027|pull_request:44029", "pull_request:44027|pull_request:44030", "pull_request:44028|pull_request:44029", "pull_request:44028|pull_request:44030", "pull_request:44001|pull_request:44025", "pull_request:44001|pull_request:44026", "pull_request:44002|pull_request:44025", "pull_request:44000|pull_request:44025", "pull_request:44000|pull_request:44026"], "split": true, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 5792, "estimated_input_tokens": 2768, "item_count": 3, "node_count": 3, "serialized_chars": 11072, "soft_pair_count": 3}, "cached_at": "2026-04-14T21:41:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "cf3d9cd398ed5e933efc3b6da8287ed7e77abcfc393ba9007243c357623b1ca0", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44662", "pull_request:45075", "pull_request:45401"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 44662 is the strongest representative of this cluster: it is the most developed/reviewed implementation and includes the broadest set of files/tests, but it still addresses a distinct model from the others.", "canonical_issue_reason": null, "canonical_pr_reason": "No single PR is a clear canonical duplicate target because each implements a different model family (PenguinVL, DeepSeek-OCR-2, Voxtral TTS).", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44662", "reason": "Different model families and code paths: PenguinVL vs Voxtral TTS. Shared auto-registration/docs files are just framework boilerplate, not evidence of the same change.", "right": "pull_request:45401"}, {"accept": false, "left": "pull_request:44662", "reason": "Different model families and implementations: PenguinVL vs DeepSeek-OCR-2. Overlap is limited to common Transformers scaffolding, so these should not be merged as duplicates.", "right": "pull_request:45075"}, {"accept": false, "left": "pull_request:45075", "reason": "DeepSeek-OCR-2 and Voxtral TTS are unrelated model additions with separate architectures and targets; shared repo integration files are not enough to treat them as the same PR.", "right": "pull_request:45401"}], "summary": "All three pull requests are separate model-addition efforts with only shared boilerplate in Transformers registration/docs. They are not duplicates of the same underlying bug or change."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44662|pull_request:45401", "pull_request:44662|pull_request:45075", "pull_request:45075|pull_request:45401"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2460, "estimated_input_tokens": 1102, "item_count": 2, "node_count": 2, "serialized_chars": 4407, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:42:12Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "be59c07ba21b70572a8dfd36153554116d1a2bbd6feea888008cf887b07f0359", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:44979", "pull_request:45363"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "#45363 is the more concrete, user-facing fusion feature because it wires fusion into model loading/execution, whereas #44979 is lower-level API scaffolding.", "canonical_issue_reason": null, "canonical_pr_reason": "No single canonical PR: #44979 is a standalone module-fusion API addition, while #45363 is a separate KernelConfig-based integration path with different files and behavior.", "confidence": 0.96, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:44979", "reason": "Different underlying changes: #44979 adds a new module_fusion API, while #45363 implements n-to-1 kernel fusion through KernelConfig and integration hooks. They do not appear mergeable as one PR.", "right": "pull_request:45363"}], "summary": "These two PRs are related by the broad theme of fusion, but they implement different things: one introduces a general Module Fusion API scaffold, while the other adds n-to-1 kernel fusion integration via KernelConfig. They are not duplicates."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:44979|pull_request:45363"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2464, "estimated_input_tokens": 1104, "item_count": 2, "node_count": 2, "serialized_chars": 4414, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:42:20Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "2720e1d7ad812495e5ad99c11aaf92079832746a03e0b87b97fd6d136436b422", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45086", "pull_request:45317"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45317 is the best PR candidate because it contains the same code fix plus validation coverage, indicating a more finished and merge-ready change.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45317 is the better canonical PR: it fixes the same concrete bug as 45086 but is more complete, with an added test and clearer title.", "confidence": 0.98, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45086", "reason": "Same underlying bug and same code-path fix in `src/transformers/tokenization_utils_tokenizers.py`; the second PR is essentially the first fix plus tests, so they are duplicates for triage.", "right": "pull_request:45317"}], "summary": "Both pull requests address the same Mistral regex patch AttributeError by switching from `tokenizer.backend_tokenizer.pre_tokenizer` to `tokenizer.pre_tokenizer`; the later PR also adds test coverage, making it the stronger canonical representative."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45086|pull_request:45317"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2950, "estimated_input_tokens": 1347, "item_count": 2, "node_count": 2, "serialized_chars": 5385, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:42:31Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "85bc9145ca0d48c162af7d1baec86c3cb92e1b036a01192d8738bed9bb2e772f", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45332", "pull_request:45333"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45332 is the strongest cluster anchor: it is the broader concrete implementation and most likely the main integration point for the feature.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45332 is the best canonical representative because it covers the end-to-end heterogeneous model support path, including cache, masking, modeling, and tests, rather than only the config layer.", "confidence": 0.84, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45332", "reason": "They share heterogeneous-config infrastructure, but #45333 is a config-focused PR while #45332 is a broader model/runtime PR. They are related feature work, not the same underlying code change or bug fix, and would not plausibly collapse into one PR without losing separation of concerns.", "right": "pull_request:45333"}], "summary": "Two open PRs in the same heterogeneous-config feature area, but they are not duplicates: one is config-only, the other is a broader modeling/cache/masking implementation that likely builds on it."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45332|pull_request:45333"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 1774, "estimated_input_tokens": 759, "item_count": 2, "node_count": 2, "serialized_chars": 3034, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:42:46Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d81d80e8bf31d6fdb0098909a80e75e153cb72d5c356c3dfc2d39f19b32665ba", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45351", "pull_request:45427"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR #45351 is the best representative because it directly addresses the failure path without introducing a pre-import reference to `torch` in the condition.", "canonical_issue_reason": null, "canonical_pr_reason": "PR #45351 is the cleaner, safer fix: it imports `torch` first and then returns early when `torch.cuda.is_available()` is false, avoiding the capability call entirely.", "confidence": 0.97, "soft_edge_verdicts": [{"accept": true, "left": "pull_request:45351", "reason": "Both PRs fix the same concrete bug in `src/transformers/testing_utils.py`: guarding CUDA capability access when CUDA is unavailable. The code paths and target issue match closely enough that they could be merged into one PR.", "right": "pull_request:45427"}], "summary": "Two PRs target the same bug in `testing_utils.get_device_properties()`\u2014calling CUDA capability APIs when CUDA is unavailable. They are effectively duplicate fixes against the same file and same tracking issue."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45351|pull_request:45427"], "split": false, "trimmed": false} +{"aggressively_trimmed": false, "budget": {"estimated_eval_tokens": 2904, "estimated_input_tokens": 1324, "item_count": 2, "node_count": 2, "serialized_chars": 5296, "soft_pair_count": 1}, "cached_at": "2026-04-14T21:43:21Z", "key": {"analyst_prompt_version": "1.0", "cache_schema_version": "1.0", "evaluator_prompt_version": "1.0", "hybrid_review_settings_fingerprint": "0f43626e1e29c17d95909c055aefdee163006b55198caa80750bd160f31a6d79", "model": "gpt-5.4-mini?service_tier=flex", "prepared_review_unit_hash": "d07ac36c4f75972060a8d43d10a04490cf0f0bd8efa701c17699376ac65bb3b6", "prepared_review_unit_schema_version": "1.0"}, "nodes": ["pull_request:45415", "pull_request:45425"], "result": {"analyst_result": {"best_issue_reason": null, "best_pr_reason": "PR 45415 is the best overall representative because it is the more substantial and central typing cleanup; 45425 is narrower and more tooling/config-oriented.", "canonical_issue_reason": null, "canonical_pr_reason": "PR 45415 is the broader, more representative type-checking change across many Transformers modules and the pyproject typing rules, so it best captures the cluster\u2019s main theme.", "confidence": 0.88, "soft_edge_verdicts": [{"accept": false, "left": "pull_request:45415", "reason": "Both are typing-related, but they do not appear to fix the same concrete code-path problem. 45415 is a broad multi-file type-checking pass, while 45425 is a narrower `ty`/`modeling_utils` typing adjustment; they would not naturally merge into one PR.", "right": "pull_request:45425"}], "summary": "Both pull requests are about typing/type-checking work in Transformers, but they address different scopes: one broadly adds type checking across many `src/transformers/*` files, while the other is a narrower typing/`ty`-configuration change centered on `modeling_utils` and related tooling/docs."}, "error_kind": null, "error_message": null, "evaluator_result": null, "evaluator_used": false, "retried": false}, "soft_pairs": ["pull_request:45415|pull_request:45425"], "split": false, "trimmed": false}