diff --git "a/analysis-report.llm-reviews.json" "b/analysis-report.llm-reviews.json" new file mode 100644--- /dev/null +++ "b/analysis-report.llm-reviews.json" @@ -0,0 +1,22468 @@ +{ + "schema_version": "1.0", + "repo": "huggingface/transformers", + "snapshot_id": "20260414T200457Z", + "generated_at": "2026-04-14T21:43:21Z", + "model": "gpt-5.4-mini?service_tier=flex", + "reviews": [ + { + "label": "issue", + "component_index": 1, + "component_count": 12, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-13244-3", + "nodes": [ + "issue:13244", + "issue:15980", + "issue:16618" + ], + "soft_pairs": [ + "issue:13244|issue:16618", + "issue:15980|issue:16618" + ], + "prepared_review_unit_hash": "8de19dda1341c388e2af84bd6b0c83c22e7cc0b4f3a2b26a4db2a5f7d7b6a612", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 2477, + "estimated_input_tokens": 620, + "estimated_eval_tokens": 1496 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 2477, + "estimated_input_tokens": 620, + "estimated_eval_tokens": 1496 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These three issues are not duplicates: one is about TAPAS tokenization mismatch, one about authentication/error messaging when downloading a private model, and one about failing to load bert-base-uncased. They share only broad model-loading/library context.", + "confidence": 0.96, + "canonical_issue_reason": "Issue #16618 is the most concrete user-facing model-loading report, but it is not a true duplicate of the others; it is only the closest representative of the cluster by topic breadth.", + "canonical_pr_reason": null, + "best_issue_reason": "#16618 is the best single issue to keep as the cluster representative because it is a direct failure-to-load report, but it should not subsume the other two as duplicates.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:13244", + "right": "issue:16618", + "accept": false, + "reason": "Different bugs: TAPAS tokenization parity with TensorFlow code versus inability to load bert-base-uncased. No shared code path or concrete fix overlap is evident." + }, + { + "left": "issue:15980", + "right": "issue:16618", + "accept": false, + "reason": "Different failure modes: private-model download/authentication error message versus loading a public model. Related to model loading broadly, but not the same underlying bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 1, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:30277", + "issue:34634", + "issue:35545", + "issue:36010", + "issue:41720", + "issue:42915", + "issue:43596", + "issue:43638", + "issue:43716", + "issue:43828", + "issue:44292", + "issue:44509", + "issue:44512", + "issue:44661", + "issue:44805", + "issue:44918", + "issue:45161", + "issue:45237" + ], + "soft_pairs": [ + "issue:43716|issue:45237", + "issue:43828|issue:45237", + "issue:44292|issue:45237", + "issue:30277|issue:35545", + "issue:35545|issue:42915", + "issue:35545|issue:41720", + "issue:44509|issue:44512", + "issue:43638|issue:44805", + "issue:35545|issue:36010", + "issue:43596|issue:44805", + "issue:44805|issue:45161", + "issue:44805|issue:44918", + "issue:44661|issue:44805", + "issue:30277|issue:43638", + "issue:34634|issue:35545", + "issue:43596|issue:43638" + ], + "prepared_review_unit_hash": "6f39acfd230557b8a448c89376608a8b1e6dc0c48ed8dd76df67f1fc0725fcc6", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 16, + "serialized_chars": 15130, + "estimated_input_tokens": 3783, + "estimated_eval_tokens": 7822 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly unrelated issue set; the only clear duplicate pair is the two BertModel + DeepSpeed ZeRO-3 index-out-of-bounds reports.", + "confidence": 0.91, + "canonical_issue_reason": "issue:43596 is the clearest, most specific description of the shared BertModel + ZeRO-3 index-out-of-bounds bug; issue:43638 appears to be the same underlying report with slightly different wording.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43596 is the best representative of the only accepted duplicate pair and has the most precise title for the underlying bug.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43716", + "right": "issue:45237", + "accept": false, + "reason": "Different models and different failure modes: image-preprocessor dtype mismatch vs AMD GPU runtime failure." + }, + { + "left": "issue:43828", + "right": "issue:45237", + "accept": false, + "reason": "Both mention dtype issues, but they describe different model/hardware contexts and do not look like the same bug." + }, + { + "left": "issue:44292", + "right": "issue:45237", + "accept": false, + "reason": "Different model and likely different backend/runtime problem; not the same concrete bug." + }, + { + "left": "issue:30277", + "right": "issue:35545", + "accept": false, + "reason": "Unrelated failures: distributed collective mismatch during ZeRO-3 vs ONNX export error." + }, + { + "left": "issue:35545", + "right": "issue:42915", + "accept": false, + "reason": "Different models and different code paths: ONNX export vs FineGrainedFP8Config runtime failure." + }, + { + "left": "issue:35545", + "right": "issue:41720", + "accept": false, + "reason": "ModernBERT ONNX export error and Qwen3 device-mapping CUDA assert are unrelated." + }, + { + "left": "issue:44509", + "right": "issue:44512", + "accept": false, + "reason": "Both are docs regressions, but they concern different removed commands and do not appear to be the same change." + }, + { + "left": "issue:43638", + "right": "issue:44805", + "accept": false, + "reason": "Both are IndexError reports, but the BertModel ZeRO-3 init issue and the mask-shape mismatch are different failures." + }, + { + "left": "issue:35545", + "right": "issue:36010", + "accept": false, + "reason": "ONNX export failure and GenerationMixin import failure are unrelated." + }, + { + "left": "issue:43596", + "right": "issue:44805", + "accept": false, + "reason": "Different error messages and likely different code paths; not the same underlying bug." + }, + { + "left": "issue:44805", + "right": "issue:45161", + "accept": false, + "reason": "Different model families and failure modes; no clear shared bug." + }, + { + "left": "issue:44805", + "right": "issue:44918", + "accept": false, + "reason": "Mask-shape IndexError and Qwen3.5 embedding unpacking failure are different issues." + }, + { + "left": "issue:44661", + "right": "issue:44805", + "accept": false, + "reason": "Tokenizer mapping failure while adding a model is unrelated to the mask/tensor IndexError." + }, + { + "left": "issue:30277", + "right": "issue:43638", + "accept": false, + "reason": "Collective mismatch during distributed training is not the same as the BertModel ZeRO-3 index error." + }, + { + "left": "issue:34634", + "right": "issue:35545", + "accept": false, + "reason": "BarkProcessor voice_preset bug and ModernBERT export error are unrelated." + }, + { + "left": "issue:43596", + "right": "issue:43638", + "accept": true, + "reason": "Same BertModel + DeepSpeed ZeRO-3 index-out-of-bounds failure, described with only minor wording differences." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 2, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:24643", + "issue:30277", + "issue:34928", + "issue:36331", + "issue:39290", + "issue:43039", + "issue:43425", + "issue:43596", + "issue:43638", + "issue:43828", + "issue:43975", + "issue:44292", + "issue:44457", + "issue:44805", + "issue:45081", + "issue:45161", + "issue:45237" + ], + "soft_pairs": [ + "issue:43975|issue:44457", + "issue:43039|issue:45161", + "issue:44292|issue:44457", + "issue:34928|issue:44805", + "issue:24643|issue:30277", + "issue:43975|issue:45237", + "issue:43638|issue:45161", + "issue:34928|issue:36331", + "issue:43596|issue:45161", + "issue:39290|issue:45081", + "issue:43828|issue:45161", + "issue:43425|issue:44292" + ], + "prepared_review_unit_hash": "2abf7274e9b185445a9106749fd14476c822dd52d8b737d46e71333ab8df5e3d", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 12, + "serialized_chars": 13690, + "estimated_input_tokens": 3423, + "estimated_eval_tokens": 7102 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a grab-bag of unrelated training/runtime issues across different models and backends. The only clearly near-duplicate subcluster is the DeepSpeed ZeRO-3 + BERT IndexError pair (43596/43638); the rest should stay separate.", + "confidence": 0.67, + "canonical_issue_reason": "Issue 43596 is the best representative of the only strong duplicate-like subcluster: it states the general DeepSpeed ZeRO-3 + BERT IndexError clearly, while 43638 is a narrower variant of the same report.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43596 is the strongest global issue choice because it is the cleanest, broadest formulation of the only closely related bug in the set and is more canonical than the more specific 43638.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43975", + "right": "issue:44457", + "accept": false, + "reason": "Different bugs: one is incorrect detokenization for a specific model, the other is LoRA merge/save/reload output drift." + }, + { + "left": "issue:43039", + "right": "issue:45161", + "accept": false, + "reason": "Both mention model/runtime behavior, but the failures are unrelated: Liger cross-entropy dispatch vs GPT-OSS tensor-parallel support." + }, + { + "left": "issue:44292", + "right": "issue:44457", + "accept": false, + "reason": "Qwen-3 NVFP4 runtime failure and LoRA merge/reload mismatch are different code paths and symptoms." + }, + { + "left": "issue:34928", + "right": "issue:44805", + "accept": false, + "reason": "Both are shape-related errors, but one is FSDP + activation checkpointing recomputation mismatch and the other is a mask/indexing shape error; not the same underlying bug." + }, + { + "left": "issue:24643", + "right": "issue:30277", + "accept": false, + "reason": "Both involve DeepSpeed, but the concrete failures differ: 2-D weight requirement vs collective mismatch on ranks." + }, + { + "left": "issue:43975", + "right": "issue:45237", + "accept": false, + "reason": "Detokenization bug for DeepSeek coder is unrelated to GPT-OSS-20B failing on AMD GPUs." + }, + { + "left": "issue:43638", + "right": "issue:45161", + "accept": false, + "reason": "Different issues: DeepSpeed ZeRO-3/BERT IndexError vs GPT-OSS MoE tensor-parallel not working." + }, + { + "left": "issue:34928", + "right": "issue:36331", + "accept": false, + "reason": "Activation checkpointing/FSDP tensor-size mismatch is unrelated to the Trainer compute_loss signature TypeError." + }, + { + "left": "issue:43596", + "right": "issue:45161", + "accept": false, + "reason": "DeepSpeed ZeRO-3/BERT init IndexError and GPT-OSS MoE TP failure are unrelated." + }, + { + "left": "issue:39290", + "right": "issue:45081", + "accept": false, + "reason": "Both are tokenizer/config-related crashes, but one is a Gemma3 vLLM config attribute error and the other is a Mistral regex patch backend_tokenizer attribute error." + }, + { + "left": "issue:43828", + "right": "issue:45161", + "accept": false, + "reason": "Autocast dtype mismatch for Phi-tiny-MoE is not the same bug as GPT-OSS MoE tensor-parallel failure." + }, + { + "left": "issue:43425", + "right": "issue:44292", + "accept": false, + "reason": "Torch version incompatibility is too broad and does not match the specific Qwen-3 NVFP4 runtime failure." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 3, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:30277", + "issue:34928", + "issue:36010", + "issue:36331", + "issue:41720", + "issue:41762", + "issue:43039", + "issue:43541", + "issue:43638", + "issue:43716", + "issue:43827", + "issue:43828", + "issue:44292", + "issue:44512", + "issue:44560", + "issue:44805", + "issue:44918", + "issue:45161" + ], + "soft_pairs": [ + "issue:43541|issue:43716", + "issue:44918|issue:45161", + "issue:34928|issue:41720", + "issue:43828|issue:44292", + "issue:43716|issue:44292", + "issue:44292|issue:44918", + "issue:43039|issue:44918", + "issue:41762|issue:43638", + "issue:43827|issue:44512", + "issue:43716|issue:44560", + "issue:43716|issue:44918", + "issue:43039|issue:43828", + "issue:30277|issue:36331", + "issue:43039|issue:44805", + "issue:34928|issue:36010" + ], + "prepared_review_unit_hash": "c3f424d68a8ed25c30a371ba3ddc07aed224186b77b105edea82a8d06a7a5b89", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 15, + "serialized_chars": 15044, + "estimated_input_tokens": 3761, + "estimated_eval_tokens": 7778 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous and looks like a set of unrelated issues that were scored as superficially similar by text overlap. The soft pairs mostly share broad keywords (dtype mismatch, ZeRO-3, Qwen, docs, etc.) but not the same concrete failure path, model, or fix target.", + "confidence": 0.96, + "canonical_issue_reason": "No clear canonical issue: the items span unrelated bugs and docs regressions across different models, parallelism modes, and subsystems.", + "canonical_pr_reason": null, + "best_issue_reason": "No single issue is a good representative for deduping this cluster because the overlap is mostly superficial.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43541", + "right": "issue:43716", + "accept": false, + "reason": "Different models and failures: torch dynamo grouped_mm tracing error vs image preprocessor/model dtype mismatch." + }, + { + "left": "issue:44918", + "right": "issue:45161", + "accept": false, + "reason": "Both mention Qwen3/embedding-related behavior, but one is an SFT trainer unpacking issue and the other is a TP-only MoE problem." + }, + { + "left": "issue:34928", + "right": "issue:41720", + "accept": false, + "reason": "Different root causes and symptoms: activation checkpointing/FSDP tensor recomputation vs Qwen3 auto device map cuda assert." + }, + { + "left": "issue:43828", + "right": "issue:44292", + "accept": false, + "reason": "One is an autocast dtype mismatch on Phi-tiny-MoE, the other is a Qwen-3 NVFP4 runtime error; not the same bug." + }, + { + "left": "issue:43716", + "right": "issue:44292", + "accept": false, + "reason": "Both involve model execution errors, but the models and failure modes differ substantially (dtype mismatch vs NVFP4 runtime error)." + }, + { + "left": "issue:44292", + "right": "issue:44918", + "accept": false, + "reason": "Different code paths: NVFP4 model execution error vs TRL SFT embedding unpacking issue." + }, + { + "left": "issue:43039", + "right": "issue:44918", + "accept": false, + "reason": "One is about Liger Kernel cross_entropy dispatch; the other is TRL SFT input embedding unpacking." + }, + { + "left": "issue:41762", + "right": "issue:43638", + "accept": false, + "reason": "Both hit ZeRO-3 loading, but the concrete failures differ: Gemma3 loading index error vs non-pretrained Bert training index error." + }, + { + "left": "issue:43827", + "right": "issue:44512", + "accept": false, + "reason": "Same general docs/v5 theme, but different removed command references and likely different documentation locations." + }, + { + "left": "issue:43716", + "right": "issue:44560", + "accept": false, + "reason": "Different issues: image preprocessor/model dtype mismatch vs Qwen3-vl video StopIteration." + }, + { + "left": "issue:43716", + "right": "issue:44918", + "accept": false, + "reason": "Dtype mismatch on a vision-language model is not the same as Qwen3.5 embedding unpacking with TRL SFT." + }, + { + "left": "issue:43039", + "right": "issue:43828", + "accept": false, + "reason": "Liger Kernel cross_entropy routing and autocast dtype mismatch are related only at a high level, not the same concrete bug." + }, + { + "left": "issue:30277", + "right": "issue:36331", + "accept": false, + "reason": "Both involve training-time runtime errors, but one is collective mismatch with ZeRO-3 and the other is a Trainer API signature break." + }, + { + "left": "issue:43039", + "right": "issue:44805", + "accept": false, + "reason": "Cross-entropy dispatch with Liger Kernel is unrelated to a mask/index shape mismatch error." + }, + { + "left": "issue:34928", + "right": "issue:36010", + "accept": false, + "reason": "Activation checkpointing/FSDP recomputation issue is unrelated to the GenerationMixin import error." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 4, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:39290", + "issue:41093", + "issue:41720", + "issue:41762", + "issue:43039", + "issue:43404", + "issue:43425", + "issue:43531", + "issue:43596", + "issue:43638", + "issue:43792", + "issue:43854", + "issue:43866", + "issue:43901", + "issue:44512", + "issue:44560", + "issue:44863", + "issue:45070" + ], + "soft_pairs": [ + "issue:41762|issue:43596", + "issue:43901|issue:44512", + "issue:39290|issue:41720", + "issue:43039|issue:44560", + "issue:43866|issue:44863", + "issue:43854|issue:43866", + "issue:43425|issue:45070", + "issue:39290|issue:43531", + "issue:41093|issue:43638", + "issue:43792|issue:45070", + "issue:43404|issue:43866" + ], + "prepared_review_unit_hash": "bc69598399edeb0d855bea951a075d66542aad04196100a8f766cd766eb709ca", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13732, + "estimated_input_tokens": 3433, + "estimated_eval_tokens": 7122 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is highly heterogeneous: it mixes unrelated model-loading bugs, documentation issues, version-compatibility reports, and a few superficially similar IndexError/sliding_window tickets. None of the soft pairs look like safe duplicate merges.", + "confidence": 0.78, + "canonical_issue_reason": "If forced to pick one issue as the cluster anchor, #45070 is the broadest and most general regression report (core PretrainedConfig/pydantic breakage) and is more representative than the many model-specific tickets. This is still a weak canonical choice because the cluster is not truly cohesive.", + "canonical_pr_reason": null, + "best_issue_reason": "#45070 is the best issue to keep as the representative ticket because it is a current, core-framework regression rather than a narrow model-specific or docs-only report. It is the least specific and most likely to subsume nearby config-validation regressions.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41762", + "right": "issue:43596", + "accept": false, + "reason": "Both are IndexError reports under DeepSpeed ZeRO-3, but they involve different models and different code paths (Gemma3 loading vs BertModel init). Too weak to treat as the same bug." + }, + { + "left": "issue:43901", + "right": "issue:44512", + "accept": false, + "reason": "Both are documentation-related, but they concern different docs and different behavior changes. Not the same underlying issue." + }, + { + "left": "issue:39290", + "right": "issue:41720", + "accept": false, + "reason": "Different model families and failures: Gemma3 config attribute error vs Qwen3 auto device mapping cudaErrorAssert. No concrete shared bug." + }, + { + "left": "issue:43039", + "right": "issue:44560", + "accept": false, + "reason": "Liger Kernel cross_entropy routing is unrelated to a Qwen3-vl-embedding video StopIteration failure. Different subsystems and failure modes." + }, + { + "left": "issue:43866", + "right": "issue:44863", + "accept": false, + "reason": "Both are loading-related, but one reports a corrupted Ovis2 checkpoint and the other a NemotronH implementation/checkpoint loading problem. Not the same issue." + }, + { + "left": "issue:43854", + "right": "issue:43866", + "accept": false, + "reason": "Different models and different root causes: GLM-4.7-Flash test loading failure vs Ovis2 checkpoint corruption." + }, + { + "left": "issue:43425", + "right": "issue:45070", + "accept": false, + "reason": "Torch 2.10 incompatibility is a version/support issue, while #45070 is a PretrainedConfig pydantic regression. Distinct bugs." + }, + { + "left": "issue:39290", + "right": "issue:43531", + "accept": false, + "reason": "Both mention sliding_window, but one is a missing config field in Gemma3 and the other is a Qwen3-MoE sliding_window behavior issue. Not enough evidence of one shared bug." + }, + { + "left": "issue:41093", + "right": "issue:43638", + "accept": false, + "reason": "Both are IndexErrors, but the shapes/modes differ and the second is specifically ZeRO-3 with BertModel. Too generic to merge." + }, + { + "left": "issue:43792", + "right": "issue:45070", + "accept": false, + "reason": "Whisper loading failure and PretrainedConfig/pydantic breakage are unrelated." + }, + { + "left": "issue:43404", + "right": "issue:43866", + "accept": false, + "reason": "Mistral3 lm_head tying bug is unrelated to an Ovis2 checkpoint corruption report." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 5, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36010", + "issue:39290", + "issue:41093", + "issue:41762", + "issue:43296", + "issue:43366", + "issue:43531", + "issue:43541", + "issue:43572", + "issue:43828", + "issue:44291", + "issue:44387", + "issue:44589", + "issue:44841", + "issue:45005", + "issue:45070", + "issue:45084", + "issue:45161" + ], + "soft_pairs": [ + "issue:43541|issue:43828", + "issue:36010|issue:39290", + "issue:44841|issue:45084", + "issue:41093|issue:41762", + "issue:43296|issue:43366", + "issue:43572|issue:45070", + "issue:44387|issue:45005", + "issue:43541|issue:45161", + "issue:44291|issue:45070", + "issue:43531|issue:45070", + "issue:43541|issue:44589" + ], + "prepared_review_unit_hash": "4a75bbb90be924183a00452c341945b56ead68c0eed03dbf819324fc0306cdc3", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13725, + "estimated_input_tokens": 3432, + "estimated_eval_tokens": 7120 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: most items are unrelated bug reports spanning config/schema regressions, model-specific loading failures, runtime shape errors, and quantization/autocast issues. The soft-similarity pairs only share broad surface language, not the same concrete bug or fix path, so none should be merged as duplicates.", + "confidence": 0.93, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43541", + "right": "issue:43828", + "accept": false, + "reason": "Both involve MoE/runtime errors, but one is a grouped_mm tracing failure in MixtralForCausalLM and the other is an autocast dtype mismatch in Phi-tiny-MoE; different models and code paths." + }, + { + "left": "issue:36010", + "right": "issue:39290", + "accept": false, + "reason": "These are distinct regressions: one is an import/export failure for GenerationMixin, the other is a Gemma3TextConfig attribute missing during vLLM loading." + }, + { + "left": "issue:44841", + "right": "issue:45084", + "accept": false, + "reason": "Voxtral processor failure and a generic 'Can't compile non template nodes' error are unrelated symptoms with no shared code path or fix target." + }, + { + "left": "issue:41093", + "right": "issue:41762", + "accept": false, + "reason": "Both are IndexError reports, but one is a mask/tensor length mismatch and the other is an empty-tensor index error when loading Gemma3 with DeepSpeed ZeRO-3; not the same bug." + }, + { + "left": "issue:43296", + "right": "issue:43366", + "accept": false, + "reason": "PaddleOCR-VL loading in vLLM is a concrete model-integration issue, while gpt-oss GGUF support is a feature request/compatibility topic; not duplicates." + }, + { + "left": "issue:43572", + "right": "issue:45070", + "accept": false, + "reason": "StableLmConfig missing pad_token_idx after 5.0 and a PretrainedConfig pydantic-field regression are both config issues, but they affect different fields and failure modes." + }, + { + "left": "issue:44387", + "right": "issue:45005", + "accept": false, + "reason": "Int4 quantization OOM from CUDA reserved memory is unrelated to tied-weights behavior in translation models; different subsystems and fixes." + }, + { + "left": "issue:43541", + "right": "issue:45161", + "accept": false, + "reason": "Both mention MoE/GPT-OSS-ish model loading concerns, but one is a torch dynamo grouped_mm tracing runtime error and the other is tensor-parallelism not working; not the same underlying defect." + }, + { + "left": "issue:44291", + "right": "issue:45070", + "accept": false, + "reason": "The init_empty_weights TypeError from an unexpected _is_hf_initialized argument is a constructor/signature compatibility bug, not the same as the pydantic PretrainedConfig field regression." + }, + { + "left": "issue:43531", + "right": "issue:45070", + "accept": false, + "reason": "A sliding_window bug in Qwen3-MoE models is unrelated to a PretrainedConfig pydantic model-field breakage; they do not share the same code path." + }, + { + "left": "issue:43541", + "right": "issue:44589", + "accept": false, + "reason": "Grouped_mm tracing in MixtralForCausalLM and missing Float8_e4m3fnStorage are separate runtime/backend issues with different triggers and fixes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 6, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:39186", + "issue:39290", + "issue:40990", + "issue:41093", + "issue:42915", + "issue:43054", + "issue:43404", + "issue:43541", + "issue:43596", + "issue:43645", + "issue:43742", + "issue:43994", + "issue:44530", + "issue:44863", + "issue:44918", + "issue:44960", + "issue:45005", + "issue:45399" + ], + "soft_pairs": [ + "issue:39290|issue:43742", + "issue:43054|issue:43994", + "issue:41093|issue:43596", + "issue:42915|issue:43541", + "issue:40990|issue:44960", + "issue:39186|issue:44918", + "issue:43404|issue:45005", + "issue:43645|issue:45399", + "issue:44530|issue:45005", + "issue:44863|issue:45005" + ], + "prepared_review_unit_hash": "5b04d8441024d02b1d87df35a681bf3d1be76ef96f8169b209025cbecd35fa6e", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13391, + "estimated_input_tokens": 3348, + "estimated_eval_tokens": 6952 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a loose collection of unrelated model/runtime bugs. Under a strict duplicate standard, none of the soft pairs are clear same-code-path duplicates; the SigLIP2 pair is the closest but still too vague to merge confidently.", + "confidence": 0.76, + "canonical_issue_reason": "issue:45005 is the broadest open report here and the most umbrella-like regression theme ('tied weights in v5'), so it is the best representative of the set even though most other items are unrelated.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45005 best fits as the cluster representative because it is general, current, and describes a reusable failure mode rather than a one-off model-specific symptom.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:39290", + "right": "issue:43742", + "accept": false, + "reason": "Different failure modes and model families: Gemma3/vLLM attribute error vs MobileLLM load key error. Similarity is only superficial." + }, + { + "left": "issue:43054", + "right": "issue:43994", + "accept": false, + "reason": "Both mention SigLIP2, but one is about poor text embeddings and the other about nonsensical AutoModel/pipeline output. Not enough evidence they are the same concrete bug." + }, + { + "left": "issue:41093", + "right": "issue:43596", + "accept": false, + "reason": "Both are tensor/indexing-style errors, but the shapes, stack context, and setup differ; no clear shared underlying code path." + }, + { + "left": "issue:42915", + "right": "issue:43541", + "accept": false, + "reason": "Different models and runtime contexts: Qwen3Moe FP8 loading vs Mixtral grouped_mm failure during Dynamo tracing." + }, + { + "left": "issue:40990", + "right": "issue:44960", + "accept": false, + "reason": "Completely different reported problems and model families; only broad 'model quality' similarity." + }, + { + "left": "issue:39186", + "right": "issue:44918", + "accept": false, + "reason": "FSDP weight-shape runtime error vs TRL SFT embedding unpacking failure are distinct symptoms and likely distinct fixes." + }, + { + "left": "issue:43404", + "right": "issue:45005", + "accept": false, + "reason": "Both involve weights, but one is a specific Mistral3 lm_head tying bug and the other is a broad v5 translation-model tied-weights issue; not clearly the same code path." + }, + { + "left": "issue:43645", + "right": "issue:45399", + "accept": false, + "reason": "Custom-model Jupyter initialization breakage is unrelated to the flash-attn fallback gating problem." + }, + { + "left": "issue:44530", + "right": "issue:45005", + "accept": false, + "reason": "PagedAttentionCache linear_attention crash and tied-weights translation-model issues are unrelated subsystems and failure modes." + }, + { + "left": "issue:44863", + "right": "issue:45005", + "accept": false, + "reason": "NemotronH checkpoint loading is a model implementation issue, not the same as v5 tied-weights behavior in translation models." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 7, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:39692", + "issue:41553", + "issue:42915", + "issue:43054", + "issue:43644", + "issue:43828", + "issue:43873", + "issue:43883", + "issue:43950", + "issue:43975", + "issue:44292", + "issue:44360", + "issue:44492", + "issue:44512", + "issue:44534", + "issue:44928", + "issue:45005", + "issue:45399" + ], + "soft_pairs": [ + "issue:43873|issue:45005", + "issue:42915|issue:45399", + "issue:44360|issue:44512", + "issue:43975|issue:44292", + "issue:44492|issue:44512", + "issue:43950|issue:44534", + "issue:43644|issue:43950", + "issue:43828|issue:44928", + "issue:39692|issue:43054", + "issue:41553|issue:43883" + ], + "prepared_review_unit_hash": "fbd53343bdf9a432ff8dc41272859ad3e6b2fecc35e7ad1b4f63ed9f9bf92911", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13240, + "estimated_input_tokens": 3310, + "estimated_eval_tokens": 6876 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly a heterogeneous set of unrelated issues, with one clear duplicate subgroup around transformers v5 corrupting non-persistent buffers. I would merge 43644 and 44534 into 43950; the other soft pairs look semantically similar only at the subsystem/topic level, not the same concrete bug.", + "confidence": 0.84, + "canonical_issue_reason": "Issue 43950 is the clearest and most specific report of the non-persistent buffer corruption regression, with the strongest framing of the underlying bug and impact.", + "canonical_pr_reason": null, + "best_issue_reason": "43950 is the best representative issue because it precisely states the regression and likely serves as the cleanest canonical target for the duplicate buffer-corruption reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43873", + "right": "issue:45005", + "accept": false, + "reason": "Both touch quantization/tied-weights behavior, but they describe different failures: offloading with quantization vs translation models with tied weights. Not the same bug." + }, + { + "left": "issue:42915", + "right": "issue:45399", + "accept": false, + "reason": "Qwen3Moe FP8 failure and flash-attn fallback blocking are unrelated code paths and symptoms." + }, + { + "left": "issue:44360", + "right": "issue:44512", + "accept": false, + "reason": "A DSA indexer ReLU discussion and a docs typo about a removed command are not the same underlying issue." + }, + { + "left": "issue:43975", + "right": "issue:44292", + "accept": false, + "reason": "Incorrect detokenization for one model and an error running a different quantized Qwen model are distinct bugs." + }, + { + "left": "issue:44492", + "right": "issue:44512", + "accept": false, + "reason": "A documentation typo and outdated docs mentioning a removed command are separate documentation issues, not duplicates." + }, + { + "left": "issue:43950", + "right": "issue:44534", + "accept": true, + "reason": "Both report the same transformers v5 regression where non-persistent buffers are mishandled/corrupted during loading." + }, + { + "left": "issue:43644", + "right": "issue:43950", + "accept": true, + "reason": "Same underlying bug: v5 fills or corrupts non-persistent buffers registered with persistent=False." + }, + { + "left": "issue:43828", + "right": "issue:44928", + "accept": false, + "reason": "Different model/runtime failures: autocast dtype mismatch versus RLHF NaN explosion from 3D position_ids and SDPA fallback." + }, + { + "left": "issue:39692", + "right": "issue:43054", + "accept": false, + "reason": "SigLIP2 doc example errors and worse text embeddings are both SigLIP2-related, but they are different problems." + }, + { + "left": "issue:41553", + "right": "issue:43883", + "accept": false, + "reason": "Bad AutoTokenizer error handling for Voxtral and a missing attribute on MolmoForCausalLM are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 8, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36331", + "issue:38175", + "issue:41553", + "issue:41762", + "issue:42947", + "issue:43054", + "issue:43493", + "issue:43643", + "issue:43856", + "issue:43866", + "issue:43872", + "issue:43881", + "issue:43883", + "issue:44387", + "issue:44403", + "issue:44451", + "issue:44589", + "issue:45020" + ], + "soft_pairs": [ + "issue:43866|issue:44403", + "issue:36331|issue:41762", + "issue:43872|issue:44589", + "issue:41553|issue:43881", + "issue:38175|issue:43054", + "issue:42947|issue:44387", + "issue:43881|issue:43883", + "issue:43054|issue:43493", + "issue:44403|issue:44451", + "issue:42947|issue:43856", + "issue:43643|issue:45020" + ], + "prepared_review_unit_hash": "f1252b40796bc4f0c7c3ef908828d34931a77c7e33ac3a5c45e5f3ee3dfa15a3", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13621, + "estimated_input_tokens": 3406, + "estimated_eval_tokens": 7068 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous: the reported issues span unrelated models, loading failures, training/memory problems, and dependency incompatibilities. Only the SigLIP2 pair looks like a plausible duplicate; the rest should stay separate.", + "confidence": 0.87, + "canonical_issue_reason": "issue:43493 is the best representative of the only likely duplicate pair because it states the underlying SigLIP2 HF-vs-JAX implementation discrepancy more directly; most other items are unrelated and should not be merged into a single canonical bug.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43493 has the clearest root-cause framing and is the strongest candidate for the SigLIP2-related problem in this set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43866", + "right": "issue:44403", + "accept": false, + "reason": "Different problems: corrupted checkpoint vs noisy load logging." + }, + { + "left": "issue:36331", + "right": "issue:41762", + "accept": false, + "reason": "Trainer API TypeError in compute_loss is unrelated to Gemma3 ZeRO-3 loading IndexError." + }, + { + "left": "issue:43872", + "right": "issue:44589", + "accept": false, + "reason": "Both involve dependency/quantization incompatibilities, but the concrete failures differ (_is_hf_initialized kwarg vs missing Float8 storage)." + }, + { + "left": "issue:41553", + "right": "issue:43881", + "accept": false, + "reason": "Different models and failure modes: bad AutoTokenizer error message for Voxtral vs generic loading failure for glm-4v-9b." + }, + { + "left": "issue:38175", + "right": "issue:43054", + "accept": false, + "reason": "Both concern SigLIP2 behavior, but one reports zero probabilities and the other degraded embeddings; not clearly the same bug." + }, + { + "left": "issue:42947", + "right": "issue:44387", + "accept": false, + "reason": "Both are memory-related, but LoRA gradient checkpointing and int4 reserved-memory OOM are distinct code paths." + }, + { + "left": "issue:43881", + "right": "issue:43883", + "accept": false, + "reason": "Different model-load failures with different underlying errors; not the same bug." + }, + { + "left": "issue:43054", + "right": "issue:43493", + "accept": true, + "reason": "Likely the same SigLIP2 implementation discrepancy: HF text embeddings/performance diverge from the original JAX implementation." + }, + { + "left": "issue:44403", + "right": "issue:44451", + "accept": false, + "reason": "Unnecessary loading noise is not the same as a model that cannot be loaded." + }, + { + "left": "issue:42947", + "right": "issue:43856", + "accept": false, + "reason": "Both are training/memory complaints, but they involve different workloads and mechanisms." + }, + { + "left": "issue:43643", + "right": "issue:45020", + "accept": false, + "reason": "Remote-code regressions are broad; missing fields in AutoConfig is too specific to merge with the generic recent-version breakage report without stronger evidence." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 9, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:41762", + "issue:42491", + "issue:42915", + "issue:43054", + "issue:43278", + "issue:43782", + "issue:43824", + "issue:43872", + "issue:43975", + "issue:44368", + "issue:44403", + "issue:44451", + "issue:44488", + "issue:44661", + "issue:44960", + "issue:45020", + "issue:45356" + ], + "soft_pairs": [ + "issue:44403|issue:44488", + "issue:43782|issue:44960", + "issue:42491|issue:44368", + "issue:41762|issue:42915", + "issue:43975|issue:44451", + "issue:45020|issue:45356", + "issue:44661|issue:45020", + "issue:43824|issue:43872", + "issue:43975|issue:44488", + "issue:43054|issue:43278" + ], + "prepared_review_unit_hash": "33a882d5cfb9d8fb62b09dccc3bf40fc448cc0b5a4fde435ca4f00760e6213bc", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12875, + "estimated_input_tokens": 3219, + "estimated_eval_tokens": 6694 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is loosely related by model-loading/model-compatibility regressions, but the items span many distinct bugs: Qwen/Gemma/DeepSeek/VL loading, tokenizer regressions, dtype/quantization issues, and embedding-quality complaints. None of the soft pairs look like true duplicates.", + "confidence": 0.91, + "canonical_issue_reason": "issue:45020 is the broadest representative of the shared theme: recent transformers changes breaking model loading/remote-code paths. The other issues are mostly narrower, model-specific failures or unrelated tokenizer/embedding/quantization problems.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45020 is the best single issue to anchor the cluster because it captures the widest compatibility/regression surface among the set. It is still not a true umbrella for many of the other reports, but it is the closest fit.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44403", + "right": "issue:44488", + "accept": false, + "reason": "Both involve loading-related behavior, but one is about noisy warnings while the other is a specific model load failure for sleng-bert; not the same bug." + }, + { + "left": "issue:43782", + "right": "issue:44960", + "accept": false, + "reason": "Different models and different failure modes: Qwen3VL weight_only loading vs GLM5, with no clear shared code-path bug." + }, + { + "left": "issue:42491", + "right": "issue:44368", + "accept": false, + "reason": "Both mention Qwen/LoRA, but one is a cross-version model usability problem and the other is a training-time warning about tie_word_embeddings; not duplicate enough." + }, + { + "left": "issue:41762", + "right": "issue:42915", + "accept": false, + "reason": "Separate model-specific failures in different execution modes: Gemma3 ZeRO-3 loading vs Qwen3Moe with FineGrainedFP8Config." + }, + { + "left": "issue:43975", + "right": "issue:44451", + "accept": false, + "reason": "Tokenizer detokenization corruption and model load failure are unrelated bugs." + }, + { + "left": "issue:45020", + "right": "issue:45356", + "accept": false, + "reason": "Both are regressions, but one is about remote_code model loading and the other is tokenizer codec/warning behavior for Kimi-K2.5." + }, + { + "left": "issue:44661", + "right": "issue:45020", + "accept": false, + "reason": "Both touch model registration/loading internals, but the reported failures are different concrete bugs and would not plausibly merge into one PR." + }, + { + "left": "issue:43824", + "right": "issue:43872", + "accept": false, + "reason": "ImportError for a missing Qwen VL class and a bitsandbytes Int8Params constructor mismatch are distinct incompatibility issues." + }, + { + "left": "issue:43975", + "right": "issue:44488", + "accept": false, + "reason": "DeepSeek detokenization and sleng-bert loading are unrelated." + }, + { + "left": "issue:43054", + "right": "issue:43278", + "accept": false, + "reason": "One is a SigLIP2 text-embedding quality complaint; the other is an embedding dtype mismatch between train and eval. Related area, but not the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 10, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:41553", + "issue:42915", + "issue:43257", + "issue:43404", + "issue:43742", + "issue:43792", + "issue:43827", + "issue:43866", + "issue:43872", + "issue:43950", + "issue:44220", + "issue:44291", + "issue:44509", + "issue:44991", + "issue:45020", + "issue:45081", + "issue:45399" + ], + "soft_pairs": [ + "issue:44991|issue:45399", + "issue:43792|issue:44220", + "issue:43827|issue:44509", + "issue:45081|issue:45399", + "issue:43404|issue:43950", + "issue:42915|issue:45020", + "issue:43257|issue:43866", + "issue:41553|issue:44991", + "issue:43742|issue:45020", + "issue:43742|issue:45399", + "issue:43872|issue:44291" + ], + "prepared_review_unit_hash": "24e09a81d37ee0d1820cea9ced4721e62a9c256bef9b7cb78b1291798691dd03", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 11, + "serialized_chars": 13251, + "estimated_input_tokens": 3313, + "estimated_eval_tokens": 6882 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a mix of unrelated Transformers bug reports. I found two genuine duplicate-like pairs: the v5 docs issues about removed pipeline tasks, and the `_is_hf_initialized` loading regression seen via both bitsandbytes and init_empty_weights. The rest are only loosely similar by subsystem (tokenizers, audio, MoE, remote_code) and should stay separate.", + "confidence": 0.81, + "canonical_issue_reason": "Issue 44291 is the clearest and most general statement of the shared `_is_hf_initialized` regression; 43872 looks like a downstream bitsandbytes manifestation of the same loading-path bug.", + "canonical_pr_reason": null, + "best_issue_reason": "44291 is the strongest representative issue in the cluster because it names the concrete failing code path and root cause most directly.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44991", + "right": "issue:45399", + "accept": false, + "reason": "Both are recent Transformers regressions, but one is tokenizer loading for a specific model and the other is flash-attn fallback gating; different code paths and fixes." + }, + { + "left": "issue:43792", + "right": "issue:44220", + "accept": false, + "reason": "Both are audio/speech-related, but the titles suggest a broad model failure versus a specific `_torch_extract_fbank_features()` bug; not enough to treat as the same underlying defect." + }, + { + "left": "issue:43827", + "right": "issue:44509", + "accept": true, + "reason": "Both report the same docs regression: summarization/translation pipeline task names still mentioned after v5 removed those pipelines." + }, + { + "left": "issue:45081", + "right": "issue:45399", + "accept": false, + "reason": "Mistral tokenizer regex crash and flash-attn fallback blocking are unrelated failures in different parts of the loading stack." + }, + { + "left": "issue:43404", + "right": "issue:43950", + "accept": false, + "reason": "One is a tied-weights bug in a specific multimodal model class; the other is a generic from_pretrained buffer regression. Different symptoms and likely different fixes." + }, + { + "left": "issue:42915", + "right": "issue:45020", + "accept": false, + "reason": "Qwen3MoE FP8 failure and remote_code breakage are both broad loading issues, but they are not the same model/pathology." + }, + { + "left": "issue:43257", + "right": "issue:43866", + "accept": false, + "reason": "These concern different models and different failure modes: weight conversion under accelerate+deepspeed versus a corrupted checkpoint." + }, + { + "left": "issue:41553", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer-related, but they target different models and different failure messages; no evidence they share the same bug." + }, + { + "left": "issue:43742", + "right": "issue:45020", + "accept": false, + "reason": "A KeyError for MobileLLM-125M is not the same concrete issue as the broader remote_code regression." + }, + { + "left": "issue:43872", + "right": "issue:44291", + "accept": true, + "reason": "Both mention the same unexpected `_is_hf_initialized` argument during model loading; 43872 frames it through bitsandbytes Int8Params, while 44291 shows the same underlying regression via init_empty_weights." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 11, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36331", + "issue:41093", + "issue:41553", + "issue:42915", + "issue:43257", + "issue:43854", + "issue:43872", + "issue:43940", + "issue:44479", + "issue:44488", + "issue:44530", + "issue:44560", + "issue:44805", + "issue:44863", + "issue:45084", + "issue:45305", + "issue:45313" + ], + "soft_pairs": [ + "issue:43940|issue:45313", + "issue:42915|issue:43872", + "issue:41553|issue:44488", + "issue:43854|issue:44863", + "issue:36331|issue:41093", + "issue:43257|issue:45305", + "issue:41093|issue:44805", + "issue:44479|issue:44560", + "issue:44530|issue:44863", + "issue:43872|issue:45084" + ], + "prepared_review_unit_hash": "4cbe5f71964cb597b72460a010d2c8809323538b918225cc605c5157b991bcaf", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12907, + "estimated_input_tokens": 3227, + "estimated_eval_tokens": 6710 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the items span unrelated bugs in trainer APIs, tensor shape mismatches, model/tokenizer loading, DeepSpeed/quantization, and video handling. I would not merge any of the soft pairs as duplicates.", + "confidence": 0.96, + "canonical_issue_reason": "No clear canonical issue: the items describe different underlying bugs rather than one shared defect.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45313 is the closest representative only because it is a concrete, current Qwen/DeepSpeed loading report, but it does not subsume the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43940", + "right": "issue:45313", + "accept": false, + "reason": "Both involve Qwen + DeepSpeed ZeRO-3 loading, but the model families and failure modes differ ('all params MISSING' vs 'language_model' load failure). Too broad to treat as the same bug." + }, + { + "left": "issue:42915", + "right": "issue:43872", + "accept": false, + "reason": "Different quantization paths and errors: FineGrainedFP8Config failure vs bitsandbytes Int8Params kwarg incompatibility. Not the same underlying issue." + }, + { + "left": "issue:41553", + "right": "issue:44488", + "accept": false, + "reason": "Both are loading-related, but one is a bad AutoTokenizer error for Voxtral and the other is a failure to load cjvt/sleng-bert. Different models and fix scope." + }, + { + "left": "issue:43854", + "right": "issue:44863", + "accept": false, + "reason": "Both concern model loading, but they are for different architectures/checkpoints (GLM-4.7-Flash vs NemotronH) and likely distinct code paths." + }, + { + "left": "issue:36331", + "right": "issue:41093", + "accept": false, + "reason": "A custom trainer kwarg regression and a tensor mask shape mismatch are unrelated bugs with different symptoms and code paths." + }, + { + "left": "issue:43257", + "right": "issue:45305", + "accept": false, + "reason": "Both touch DeepSpeed/Qwen training, but one is about weight conversion during load and the other about gradient averaging with GAS. Not the same concrete bug." + }, + { + "left": "issue:41093", + "right": "issue:44805", + "accept": false, + "reason": "Both are shape-mismatch IndexErrors, but there is no evidence they arise from the same code path or same fix." + }, + { + "left": "issue:44479", + "right": "issue:44560", + "accept": false, + "reason": "Both are Qwen video regressions in 5.3.0, but one is a broad regression across several models and the other is a StopIteration in qwen3-vl-embedding. Too specific to merge." + }, + { + "left": "issue:44530", + "right": "issue:44863", + "accept": false, + "reason": "PagedAttentionCache/linear_attention crash and NemotronH checkpoint loading are unrelated issues." + }, + { + "left": "issue:43872", + "right": "issue:45084", + "accept": false, + "reason": "bitsandbytes Int8Params constructor incompatibility and 'Can't compile non template nodes' are different failures with no shared underlying bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 12, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:24643", + "issue:30064", + "issue:42617", + "issue:42915", + "issue:43366", + "issue:43531", + "issue:43550", + "issue:43645", + "issue:43646", + "issue:43824", + "issue:43828", + "issue:43950", + "issue:43957", + "issue:44560", + "issue:44589", + "issue:44910", + "issue:44918", + "issue:45325" + ], + "soft_pairs": [ + "issue:43366|issue:43828", + "issue:24643|issue:30064", + "issue:43366|issue:43531", + "issue:44560|issue:44918", + "issue:43366|issue:44589", + "issue:43950|issue:45325", + "issue:43645|issue:43646", + "issue:43550|issue:44910", + "issue:43824|issue:43957", + "issue:42915|issue:43957", + "issue:42617|issue:43366" + ], + "prepared_review_unit_hash": "aa9dc8924d21c81ff6dfb02a2298814fb5f9a7aecf8f7de7d55a2db71a5475d4", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13676, + "estimated_input_tokens": 3419, + "estimated_eval_tokens": 7094 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly unrelated issue reports across different models/features; only the two Transformers 5.0 custom model initialization reports appear to be the same underlying regression. All other soft pairs are false positives and should stay separate.", + "confidence": 0.88, + "canonical_issue_reason": "Issue 43645 is the stronger representative of the only true duplicate pair: it describes the same custom-model initialization regression as 43646, with the added Jupyter notebook context and clearer reproduction scope.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44910 is the most technically specific and actionable report in the set, with a concrete root-cause statement (3D position_ids misread as packed sequence) and a well-defined failure mode.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43366", + "right": "issue:43828", + "accept": false, + "reason": "Different bugs: GGUF/gpt-oss architecture support vs a Phi MoE dtype mismatch under autocast. No shared code path or concrete fix overlap." + }, + { + "left": "issue:24643", + "right": "issue:30064", + "accept": false, + "reason": "Unrelated failures: DeepSpeed training RuntimeError about a 2-D weight tensor vs image processor handling of void segmentation maps." + }, + { + "left": "issue:43366", + "right": "issue:43531", + "accept": false, + "reason": "Both mention Qwen/MoE-style models, but one is GGUF architecture support and the other is a sliding_window issue. The underlying bug and code path differ." + }, + { + "left": "issue:44560", + "right": "issue:44918", + "accept": false, + "reason": "Both involve Qwen3.x, but one is a video StopIteration in Qwen3-vl-embedding and the other is TRL SFT unpacking embeddings. Different failure points and fixes." + }, + { + "left": "issue:43366", + "right": "issue:44589", + "accept": false, + "reason": "Completely different symptoms: gpt-oss GGUF support vs missing Float8 storage lookup. No evidence of the same root cause." + }, + { + "left": "issue:43950", + "right": "issue:45325", + "accept": false, + "reason": "Loading/regression around non-persistent buffers is a different issue from Qwen2.5-VL rope index scaling for still-image temporal position_ids." + }, + { + "left": "issue:43645", + "right": "issue:43646", + "accept": true, + "reason": "These are the same regression: Transformers 5.0 breaking custom model initialization. The notebook/Jupyter detail in 43645 is just an environment-specific manifestation of the same bug." + }, + { + "left": "issue:43550", + "right": "issue:44910", + "accept": false, + "reason": "Different concrete problems and model paths: torch.compile/SDPA on Bamba-9B-v2 vs Flash Attention illegal memory access on Qwen3.5 due to position_ids handling." + }, + { + "left": "issue:43824", + "right": "issue:43957", + "accept": false, + "reason": "Import error for a missing Qwen2.5-VL class is not the same as meta-device model loading failures. Different symptom and likely different fix." + }, + { + "left": "issue:42915", + "right": "issue:43957", + "accept": false, + "reason": "Qwen3MoE FineGrainedFP8Config failure and meta-device loading failures are distinct model/runtime regressions, not the same underlying bug." + }, + { + "left": "issue:42617", + "right": "issue:43366", + "accept": false, + "reason": "3d_parallel.py execution failure is unrelated to GGUF gpt-oss architecture support." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 13, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36010", + "issue:38175", + "issue:39692", + "issue:42915", + "issue:43317", + "issue:43596", + "issue:43638", + "issue:43644", + "issue:43646", + "issue:43950", + "issue:43994", + "issue:44534", + "issue:44560", + "issue:44661", + "issue:44756", + "issue:44805", + "issue:44877", + "issue:45030" + ], + "soft_pairs": [ + "issue:38175|issue:43994", + "issue:44560|issue:44805", + "issue:43646|issue:43950", + "issue:44877|issue:45030", + "issue:36010|issue:42915", + "issue:43638|issue:44661", + "issue:43596|issue:44661", + "issue:43317|issue:44756", + "issue:43644|issue:44534", + "issue:38175|issue:39692" + ], + "prepared_review_unit_hash": "3e0c474748f8008a37cc009c00ed32c8a23922fcbf579700cbfcf69fee5fa1f1", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13446, + "estimated_input_tokens": 3362, + "estimated_eval_tokens": 6980 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is actually several smaller duplicate groups mixed together: SigLIP2 output issues, non-persistent buffer corruption in Transformers 5.x, and config-validation regressions, plus multiple unrelated singleton bugs.", + "confidence": 0.87, + "canonical_issue_reason": "Issue 43950 is the clearest canonical for the non-persistent-buffer regression subgroup: it states the root symptom explicitly, frames it as a Transformers 5.x regression, and matches the broader buffer-corruption reports better than the shorter variants.", + "canonical_pr_reason": null, + "best_issue_reason": "As a representative issue in this mixed cluster, 43950 is the most complete and actionable bug report, and it anchors one of the strongest duplicate subgroups in the set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:38175", + "right": "issue:43994", + "accept": true, + "reason": "Same SigLIP2 base model and same failure class: incorrect/degenerate outputs from the model/pipeline path." + }, + { + "left": "issue:44560", + "right": "issue:44805", + "accept": false, + "reason": "Different concrete bugs: Qwen3-vl video StopIteration vs a mask/tensor shape mismatch." + }, + { + "left": "issue:43646", + "right": "issue:43950", + "accept": true, + "reason": "Same underlying Transformers 5.x non-persistent-buffer corruption during model initialization/from_pretrained." + }, + { + "left": "issue:44877", + "right": "issue:45030", + "accept": true, + "reason": "Both report stricter config validation rejecting otherwise valid model configs." + }, + { + "left": "issue:36010", + "right": "issue:42915", + "accept": false, + "reason": "Unrelated failures: GenerationMixin import error vs Qwen3Moe FP8 config loading failure." + }, + { + "left": "issue:43638", + "right": "issue:44661", + "accept": false, + "reason": "Different code paths and symptoms: DeepSpeed ZeRO3/Bert init index error vs tokenizer-mapping constraint in add-new-model-like." + }, + { + "left": "issue:43596", + "right": "issue:44661", + "accept": false, + "reason": "BertModel/ZeRO3 index error is unrelated to the tokenizer-mapping issue in add-new-model-like." + }, + { + "left": "issue:43317", + "right": "issue:44756", + "accept": false, + "reason": "Different underlying problems: dequantized-model offload loading failure vs mmap-related OOM on Strix Halo." + }, + { + "left": "issue:43644", + "right": "issue:44534", + "accept": true, + "reason": "Both describe the same non-persistent-buffer junk/corruption regression in Transformers 5.x." + }, + { + "left": "issue:38175", + "right": "issue:39692", + "accept": false, + "reason": "Both involve SigLIP2, but one is bad outputs while the other is a doc-example issue with model/processor mismatch and quantization failure." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 14, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:39692", + "issue:41720", + "issue:42915", + "issue:43708", + "issue:43856", + "issue:43873", + "issue:43994", + "issue:44075", + "issue:44451", + "issue:44488", + "issue:44960", + "issue:44977", + "issue:44991", + "issue:45313", + "issue:45357", + "issue:45362", + "issue:45406" + ], + "soft_pairs": [ + "issue:45313|issue:45357", + "issue:45362|issue:45406", + "issue:44960|issue:44977", + "issue:44451|issue:44488", + "issue:43708|issue:44075", + "issue:44488|issue:44991", + "issue:43856|issue:43873", + "issue:41720|issue:42915", + "issue:39692|issue:43994" + ], + "prepared_review_unit_hash": "da5f69cf3c5475a3e200fb59c842f0270f585e47b509610b8ab5f96ce680400c", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12356, + "estimated_input_tokens": 3089, + "estimated_eval_tokens": 6434 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most candidates are distinct issues. The only plausible duplicate pair is the two model-loading failures for different BERT repos (44451/44488), which look like the same tokenizer/model-loading regression. The rest differ by model, code path, or symptom.", + "confidence": 0.67, + "canonical_issue_reason": "Issue 44488 is the better representative for the shared load-failure regression: it\u2019s the later, more referenced report and covers the same failure mode seen in 44451 with another model.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44488 best captures the underlying bug because it has the clearest cross-reference signal and the broadest description of the load failure affecting multiple models.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45313", + "right": "issue:45357", + "accept": false, + "reason": "Same model family, but different concrete bugs/code paths: ZeRO-3 weight loading vs save_pretrained visual-encoder key regression." + }, + { + "left": "issue:45362", + "right": "issue:45406", + "accept": false, + "reason": "Different failures and components: Qwen3.5 chat crash versus Gemma4Processor missing _tokenizer in serve." + }, + { + "left": "issue:44960", + "right": "issue:44977", + "accept": false, + "reason": "Different models and symptoms; this is not the same underlying bug." + }, + { + "left": "issue:44451", + "right": "issue:44488", + "accept": true, + "reason": "Both report the same model-loading/tokenizer-loading regression on different BERT models, likely the same underlying bug." + }, + { + "left": "issue:43708", + "right": "issue:44075", + "accept": false, + "reason": "Trainer checkpoint resume max_steps math is unrelated to SGD optimizer arguments not being used." + }, + { + "left": "issue:44488", + "right": "issue:44991", + "accept": false, + "reason": "Both are loading-related, but one is a model-loading regression and the other is a tokenizer-loading failure for a different model; not the same concrete bug." + }, + { + "left": "issue:43856", + "right": "issue:43873", + "accept": false, + "reason": "Both mention memory/offloading, but one is MoE training memory usage and the other is quantization offloading behavior." + }, + { + "left": "issue:41720", + "right": "issue:42915", + "accept": false, + "reason": "Different Qwen3 variants and different failure modes: cudaErrorAssert on device mapping vs FineGrainedFP8Config failure." + }, + { + "left": "issue:39692", + "right": "issue:43994", + "accept": false, + "reason": "Both involve SigLIP2, but one is a docs/model-processor mismatch plus quantization failure, while the other is bad outputs with AutoModel/pipeline." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 15, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:42491", + "issue:42915", + "issue:43257", + "issue:43404", + "issue:43454", + "issue:43644", + "issue:43645", + "issue:43720", + "issue:43854", + "issue:43856", + "issue:43931", + "issue:44387", + "issue:44661", + "issue:44683", + "issue:44756", + "issue:45276", + "issue:45335" + ], + "soft_pairs": [ + "issue:43854|issue:43931", + "issue:44661|issue:44683", + "issue:42915|issue:44756", + "issue:42491|issue:45276", + "issue:42491|issue:43720", + "issue:43257|issue:43854", + "issue:43856|issue:44387", + "issue:43644|issue:43645", + "issue:42491|issue:43931", + "issue:45276|issue:45335", + "issue:43404|issue:43454", + "issue:42491|issue:43856" + ], + "prepared_review_unit_hash": "7f9e73ed2a2fcc1b545b26f28b522c03a85465e1337c434376d00be476ebea58", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 12, + "serialized_chars": 13416, + "estimated_input_tokens": 3354, + "estimated_eval_tokens": 6964 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most items are unrelated model-specific bugs. The only clear near-duplicate is the Gemma resize_token_embeddings reports (45276/45335), which appear to describe the same missing propagation of resized embeddings across closely related model variants.", + "confidence": 0.67, + "canonical_issue_reason": "issue:45276 is the strongest issue candidate because it has the clearest duplicate signal and reads as the broader Gemma embedding-resize bug, with issue:45335 as a close sibling report.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45276 is the best global issue representative: it is open, central to the only accepted near-duplicate pair, and describes the underlying API regression succinctly.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43854", + "right": "issue:43931", + "accept": false, + "reason": "Both are model-loading failures, but for different models and failure modes; not the same underlying bug." + }, + { + "left": "issue:44661", + "right": "issue:44683", + "accept": false, + "reason": "Different subsystems: tokenizer mapping generation vs compiled flex_attention on torch >= 2.9." + }, + { + "left": "issue:42915", + "right": "issue:44756", + "accept": false, + "reason": "One is Qwen3MoE FP8 behavior, the other is Strix Halo mmap memory handling; no shared bug path." + }, + { + "left": "issue:42491", + "right": "issue:45276", + "accept": false, + "reason": "Different APIs and model families: Qwen3 MoE LoRA compatibility vs Gemma token-embedding resizing." + }, + { + "left": "issue:42491", + "right": "issue:43720", + "accept": false, + "reason": "Different model families and loading issues; one is Qwen3 MoE LoRA compatibility, the other BitNet packed-weight loading." + }, + { + "left": "issue:43257", + "right": "issue:43854", + "accept": false, + "reason": "Both involve loading, but they concern unrelated models and mechanisms (accelerate/deepspeed conversion vs GLM unit-test loading)." + }, + { + "left": "issue:43856", + "right": "issue:44387", + "accept": false, + "reason": "Both mention memory/OOM, but the contexts differ too much to be the same bug (Qwen3 MoE training vs int4 quantization memory reservation)." + }, + { + "left": "issue:43644", + "right": "issue:43645", + "accept": false, + "reason": "Same release window, but different problems: junk-filled non-persistent buffers vs Jupyter custom-model initialization." + }, + { + "left": "issue:42491", + "right": "issue:43931", + "accept": false, + "reason": "Different model-specific loading bugs; no indication of the same underlying failure." + }, + { + "left": "issue:45276", + "right": "issue:45335", + "accept": true, + "reason": "Same core bug: resize_token_embeddings does not propagate to model embeddings in closely related Gemma/T5Gemma variants, so one fix could plausibly cover both." + }, + { + "left": "issue:43404", + "right": "issue:43454", + "accept": false, + "reason": "Both are untied lm_head reports, but they affect different model classes (Mistral3 vs AyaVision) and likely need separate fixes." + }, + { + "left": "issue:42491", + "right": "issue:43856", + "accept": false, + "reason": "Different Qwen3 MoE issues: LoRA compatibility vs training memory usage." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 16, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36010", + "issue:39404", + "issue:39692", + "issue:41720", + "issue:42947", + "issue:43381", + "issue:43425", + "issue:43873", + "issue:43957", + "issue:44155", + "issue:44164", + "issue:44291", + "issue:44387", + "issue:44402", + "issue:44488", + "issue:44756", + "issue:44960" + ], + "soft_pairs": [ + "issue:36010|issue:39404", + "issue:44164|issue:44291", + "issue:44402|issue:44488", + "issue:41720|issue:44155", + "issue:43873|issue:44387", + "issue:39692|issue:44960", + "issue:43425|issue:43957", + "issue:44387|issue:44756", + "issue:42947|issue:43381" + ], + "prepared_review_unit_hash": "4f1830d0e29ffbc5051f59bd65578b04c01326b6c38b3412233b18cef3f6eb43", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12437, + "estimated_input_tokens": 3110, + "estimated_eval_tokens": 6476 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These soft pairs are mostly broad similarity matches, not true duplicates. They span different models, code paths, and failure modes, so none should be merged into a single canonical bug report.", + "confidence": 0.97, + "canonical_issue_reason": "No single canonical issue fits the cluster: the candidate pairs are heterogeneous and do not share one concrete underlying bug.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue is suitable as a canonical representative because the matched issues cover unrelated defects rather than one root cause.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:36010", + "right": "issue:39404", + "accept": false, + "reason": "Import-path regression for GenerationMixin vs Whisper pipeline return_language behavior; different APIs and root causes." + }, + { + "left": "issue:44164", + "right": "issue:44291", + "accept": false, + "reason": "save/from_pretrained extra_state handling and init_empty_weights unexpected kwarg are distinct loading/saving internals bugs." + }, + { + "left": "issue:44402", + "right": "issue:44488", + "accept": false, + "reason": "Tokenizer vocab-size mismatch for one model is not the same as a separate model load failure for cjvt/sleng-bert." + }, + { + "left": "issue:41720", + "right": "issue:44155", + "accept": false, + "reason": "Qwen3 auto device mapping cuda assert and AudioFlamingo3 batched embedding/token leakage are unrelated code paths." + }, + { + "left": "issue:43873", + "right": "issue:44387", + "accept": false, + "reason": "Both concern quantization/memory, but one is offloading behavior and the other is increased reserved CUDA memory under int4; too different to merge." + }, + { + "left": "issue:39692", + "right": "issue:44960", + "accept": false, + "reason": "SigLIP2 documentation example problems are unrelated to the GLM5 issue." + }, + { + "left": "issue:43425", + "right": "issue:43957", + "accept": false, + "reason": "Torch 2.10 compatibility and torch.device('meta') loading regressions are separate compatibility/loading issues." + }, + { + "left": "issue:44387", + "right": "issue:44756", + "accept": false, + "reason": "Int4 quantization reserved-memory OOM and Strix Halo mmap OOM are different failure causes and fixes." + }, + { + "left": "issue:42947", + "right": "issue:43381", + "accept": false, + "reason": "Gradient checkpointing with PEFT LoRA not taking effect is distinct from checkpointing being unusable in eval mode." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 17, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:43381", + "issue:43646", + "issue:43708", + "issue:43854", + "issue:43957", + "issue:44164", + "issue:44291", + "issue:44292", + "issue:44479", + "issue:44743", + "issue:44756", + "issue:44877", + "issue:44912", + "issue:44928", + "issue:44960", + "issue:45216", + "issue:45276", + "issue:45292" + ], + "soft_pairs": [ + "issue:44292|issue:44912", + "issue:43854|issue:45216", + "issue:44291|issue:44756", + "issue:45276|issue:45292", + "issue:44743|issue:44960", + "issue:43957|issue:44756", + "issue:43381|issue:44928", + "issue:44164|issue:44756", + "issue:43646|issue:43708", + "issue:44479|issue:44877" + ], + "prepared_review_unit_hash": "9e9c222e22466c8df1ade37324ed7a6d190379751d3586abeaaf8004b0d1aaeb", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13429, + "estimated_input_tokens": 3358, + "estimated_eval_tokens": 6972 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a grab bag of unrelated Transformers bug reports spanning model loading, config parsing, training resume logic, quantization, and a single clear duplicate pair around `resize_token_embeddings` / `output_embeddings`.", + "confidence": 0.87, + "canonical_issue_reason": "Issue 45292 is the clearest general statement of the `resize_token_embeddings` / `output_embeddings` regression; 45276 reads like a Gemma4-specific variant of the same underlying bug.", + "canonical_pr_reason": null, + "best_issue_reason": "45292 is the best representative because it describes the core API bug in the broadest, least model-specific terms and is the strongest duplicate anchor in this set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44292", + "right": "issue:44912", + "accept": false, + "reason": "Both involve loading failures, but one is about `init_empty_weights` / `_is_hf_initialized` and the other is about strict config handling for `granite_speech`; different bugs." + }, + { + "left": "issue:43854", + "right": "issue:45216", + "accept": false, + "reason": "Both mention model loading/checkpoint behavior, but the concrete failures differ: GLM-4.7-Flash test loading vs Qwen3.5 save_pretrained checkpoint corruption." + }, + { + "left": "issue:44291", + "right": "issue:44756", + "accept": false, + "reason": "`init_empty_weights` constructor argument error and Strix Halo mmap OOM are unrelated code paths and fixes." + }, + { + "left": "issue:45276", + "right": "issue:45292", + "accept": true, + "reason": "These describe the same underlying `resize_token_embeddings` bug: output embeddings are not updated; 45276 is just Gemma4-specific wording." + }, + { + "left": "issue:44743", + "right": "issue:44960", + "accept": false, + "reason": "Qwen recurrent-state reset with cache is unrelated to the GLM5 issue; same broad family but not the same bug." + }, + { + "left": "issue:43957", + "right": "issue:44756", + "accept": false, + "reason": "One is about `torch.device(\"meta\")` loading regressions, the other about mmap on Strix Halo; no shared concrete failure." + }, + { + "left": "issue:43381", + "right": "issue:44928", + "accept": false, + "reason": "Gradient checkpointing eval-mode behavior and RLHF NaN/SDPA fallback are different failures with different root causes." + }, + { + "left": "issue:44164", + "right": "issue:44756", + "accept": false, + "reason": "`save/from_pretrained` extra_state handling and mmap OOM are unrelated save/load vs memory-mapping problems." + }, + { + "left": "issue:43646", + "right": "issue:43708", + "accept": false, + "reason": "Custom model initialization breakage and `resume_from_checkpoint` max_steps miscalculation are different trainer/model-init bugs." + }, + { + "left": "issue:44479", + "right": "issue:44877", + "accept": false, + "reason": "Video input regression in Qwen-VL models and strict granite_speech config loading are separate regressions despite both being version-related." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 18, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:43257", + "issue:43425", + "issue:43645", + "issue:43716", + "issue:43828", + "issue:43906", + "issue:43957", + "issue:44164", + "issue:44265", + "issue:44402", + "issue:44451", + "issue:44530", + "issue:44589", + "issue:44756", + "issue:44863", + "issue:44898", + "issue:45292", + "issue:45335" + ], + "soft_pairs": [ + "issue:45292|issue:45335", + "issue:44402|issue:44451", + "issue:43645|issue:44164", + "issue:43425|issue:43828", + "issue:44265|issue:44898", + "issue:43257|issue:44863", + "issue:43828|issue:43957", + "issue:44589|issue:44756", + "issue:43906|issue:44530", + "issue:43425|issue:43716" + ], + "prepared_review_unit_hash": "4ad147edb767cb6601af55d9eb4bb87c6c23995526de42c05ac2f10614aa118a", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13348, + "estimated_input_tokens": 3337, + "estimated_eval_tokens": 6930 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Only one soft pair looks like a true duplicate: the two resize_token_embeddings reports. The other pairs are similar at a high level but appear to describe different bugs, models, or code paths.", + "confidence": 0.82, + "canonical_issue_reason": "issue:45292 is the more canonical report because it states the underlying regression in broader terms (resize_token_embeddings not updating output embeddings), while issue:45335 looks like a model-specific instance of the same problem for t5gemma's decoder.embed_tokens.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45292 is the best representative issue in this cluster: it is the broadest and clearest statement of the duplicate bug, and it can absorb the more specific t5gemma variant.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45292", + "right": "issue:45335", + "accept": true, + "reason": "Same underlying bug: resize_token_embeddings updates one embedding path but not the tied/output embedding path; the second issue is just a model-specific framing of the same failure." + }, + { + "left": "issue:44402", + "right": "issue:44451", + "accept": false, + "reason": "Both mention tokenizer/model loading problems, but they refer to different models and the reports do not clearly show the same concrete failure or code path." + }, + { + "left": "issue:43645", + "right": "issue:44164", + "accept": false, + "reason": "One is about custom models in Jupyter notebooks, the other about save/from_pretrained and extra_state serialization; too different to treat as the same bug." + }, + { + "left": "issue:43425", + "right": "issue:43828", + "accept": false, + "reason": "A generic Torch 2.10 incompatibility report is not the same as a Phi-tiny-MoE autocast dtype-mismatch bug." + }, + { + "left": "issue:44265", + "right": "issue:44898", + "accept": false, + "reason": "torch.export with torch_compilable_check and Perceiver image classification resizing are unrelated failure modes." + }, + { + "left": "issue:43257", + "right": "issue:44863", + "accept": false, + "reason": "Both are model-loading failures, but they concern different architectures and different checkpoint/load paths; not enough evidence of the same bug." + }, + { + "left": "issue:43828", + "right": "issue:43957", + "accept": false, + "reason": "Autocast dtype mismatch and meta-device loading failures are distinct issues affecting different mechanisms." + }, + { + "left": "issue:44589", + "right": "issue:44756", + "accept": false, + "reason": "Float8 storage deserialization and mmap OOM on Strix Halo are unrelated problems." + }, + { + "left": "issue:43906", + "right": "issue:44530", + "accept": false, + "reason": "An isolated reproduction of one issue is not the same as a Qwen3.5 PagedAttentionCache crash on linear_attention." + }, + { + "left": "issue:43425", + "right": "issue:43716", + "accept": false, + "reason": "Torch version incompatibility and an image preprocessor/model dtype mismatch are different bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 19, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:39404", + "issue:43299", + "issue:43425", + "issue:43596", + "issue:43611", + "issue:43645", + "issue:43901", + "issue:43931", + "issue:44479", + "issue:44509", + "issue:44530", + "issue:44589", + "issue:44683", + "issue:44805", + "issue:44863", + "issue:44912", + "issue:44918", + "issue:45084" + ], + "soft_pairs": [ + "issue:39404|issue:44912", + "issue:43931|issue:44863", + "issue:43611|issue:43645", + "issue:44530|issue:45084", + "issue:43901|issue:44509", + "issue:43299|issue:43931", + "issue:44479|issue:44805", + "issue:44589|issue:45084", + "issue:43596|issue:44683", + "issue:43425|issue:44918" + ], + "prepared_review_unit_hash": "beb9be6c897cb739e0b2e2165ec68c832ca939d52d73d6e0c38249c1c5350729", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13400, + "estimated_input_tokens": 3350, + "estimated_eval_tokens": 6956 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of unrelated Transformers issues that only loosely share themes like model loading regressions, docs cleanup, or runtime incompatibilities. None of the soft-paired items look like the same underlying bug or change, so the cluster should not be merged.", + "confidence": 0.92, + "canonical_issue_reason": "If a single issue must represent the cluster, issue #43611 is the broadest and most central loading-regression report, but the overall set is too heterogeneous to have a strong true canonical issue.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #43611 is the most generally representative of the repeated 'v5 breaks model loading' theme and has a clearer, broader user impact than the other issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:39404", + "right": "issue:44912", + "accept": false, + "reason": "Whisper pipeline return_language regression and MXFP4 quantization fallback are unrelated subsystems and failure modes." + }, + { + "left": "issue:43931", + "right": "issue:44863", + "accept": false, + "reason": "Both are model-loading problems, but one is a Qwen3-VL weight-shape mismatch and the other is NemotronH checkpoint loading; no concrete shared code-path is evident." + }, + { + "left": "issue:43611", + "right": "issue:43645", + "accept": false, + "reason": "Both mention Transformers 5.0.0 loading regressions, but one is base_model_prefix loading and the other is custom notebook model initialization; these are distinct bugs." + }, + { + "left": "issue:44530", + "right": "issue:45084", + "accept": false, + "reason": "PagedAttentionCache linear_attention crash and compile-time non-template-node error are different failure classes with no sign of the same fix." + }, + { + "left": "issue:43901", + "right": "issue:44509", + "accept": false, + "reason": "Both are documentation-related, but they target different pipeline/task docs and different API removals." + }, + { + "left": "issue:43299", + "right": "issue:43931", + "accept": false, + "reason": "Both concern Qwen3-VL loading, but one is a MoE model-loading breakage and the other is a weight-shape mismatch for a different checkpoint; not the same bug." + }, + { + "left": "issue:44479", + "right": "issue:44805", + "accept": false, + "reason": "A video-input regression for Qwen VL models and a mask/index shape mismatch are not the same concrete code-path issue." + }, + { + "left": "issue:44589", + "right": "issue:45084", + "accept": false, + "reason": "Missing Float8 storage support and a compile failure on non-template nodes are unrelated errors." + }, + { + "left": "issue:43596", + "right": "issue:44683", + "accept": false, + "reason": "Deepspeed ZeRO3/BertModel index error and compiled flex_attention failure on torch>=2.9 are distinct runtime problems." + }, + { + "left": "issue:43425", + "right": "issue:44918", + "accept": false, + "reason": "Torch 2.10 incompatibility and Qwen3.5 embedding unpacking in TRL SFT trainer are unrelated issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 20, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:32090", + "issue:36032", + "issue:39404", + "issue:40990", + "issue:42947", + "issue:43452", + "issue:43582", + "issue:43632", + "issue:43856", + "issue:43957", + "issue:43994", + "issue:44079", + "issue:44164", + "issue:44488", + "issue:44671", + "issue:44756", + "issue:44991", + "issue:45081" + ], + "soft_pairs": [ + "issue:43856|issue:44756", + "issue:43994|issue:44079", + "issue:40990|issue:42947", + "issue:44671|issue:44991", + "issue:36032|issue:43452", + "issue:39404|issue:44671", + "issue:44991|issue:45081", + "issue:43582|issue:43632", + "issue:44488|issue:45081", + "issue:43957|issue:44164", + "issue:32090|issue:39404" + ], + "prepared_review_unit_hash": "0642bea3e7f6ccab1856a75965842ff8ef57818f97426129758e699af573bc8f", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13767, + "estimated_input_tokens": 3442, + "estimated_eval_tokens": 7140 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the soft pairs are broadly theme-similar (loading, tokenizer, training, memory, v5 regressions) but do not look like the same underlying bug/change. I would not merge any of the candidate pairs.", + "confidence": 0.81, + "canonical_issue_reason": "issue:44991 is the closest umbrella representative because it is a concrete Transformers v5 tokenizer-loading regression, which is the most recurrent theme across the cluster. That said, the items are not true duplicates overall.", + "canonical_pr_reason": null, + "best_issue_reason": "44991 is the most suitable single issue to anchor the cluster because it is specific, reproducible, and sits in the broad tokenizer/model-loading regression area shared by several candidates.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43856", + "right": "issue:44756", + "accept": false, + "reason": "Both mention memory/OOM, but one is Qwen3 MoE training memory use and the other is Strix Halo mmap behavior; different root causes and fixes." + }, + { + "left": "issue:43994", + "right": "issue:44079", + "accept": false, + "reason": "43994 is a model/pipeline output correctness bug for SigLIP2; 44079 is a generic ModelOutput key-assignment issue when a previous value was None. Not the same code path." + }, + { + "left": "issue:40990", + "right": "issue:42947", + "accept": false, + "reason": "High perplexity in evaluation and ineffective gradient checkpointing are distinct training-quality and training-memory issues, not the same underlying defect." + }, + { + "left": "issue:44671", + "right": "issue:44991", + "accept": false, + "reason": "One is incorrect CamemBERT MLM predictions in v5, the other is tokenizer loading failure for est-roberta. Similar version context, but different failure modes." + }, + { + "left": "issue:36032", + "right": "issue:43452", + "accept": false, + "reason": "T5 tokenizer special-token conflict and gguf_file loading failures affect different loading paths and have different error signatures." + }, + { + "left": "issue:39404", + "right": "issue:44671", + "accept": false, + "reason": "Whisper pipeline return_language regression and CamemBERT masked-LM prediction regression are unrelated model-specific behavior changes." + }, + { + "left": "issue:44991", + "right": "issue:45081", + "accept": false, + "reason": "Both are tokenizer-loading regressions, but they hit different tokenizer implementations and different crashes; not enough evidence of one shared bug." + }, + { + "left": "issue:43582", + "right": "issue:43632", + "accept": false, + "reason": "Apple Silicon caching_allocator_warmup TypeError and the _is_hf_initialized flag regression are separate issues in different subsystems." + }, + { + "left": "issue:44488", + "right": "issue:45081", + "accept": false, + "reason": "Both are model/tokenizer loading problems, but sleng-bert loading and Mistral regex/backend_tokenizer crashing are different model-specific failures." + }, + { + "left": "issue:43957", + "right": "issue:44164", + "accept": false, + "reason": "Meta-device loading regressions and save/from_pretrained extra_state handling are distinct serialization/loading bugs." + }, + { + "left": "issue:32090", + "right": "issue:39404", + "accept": false, + "reason": "Trainer _gpu_broadcast_one NoneType error and Whisper pipeline return_language failure are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 21, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:40990", + "issue:43421", + "issue:43425", + "issue:43611", + "issue:43644", + "issue:43716", + "issue:43883", + "issue:44164", + "issue:44206", + "issue:44220", + "issue:44265", + "issue:44479", + "issue:44488", + "issue:44610", + "issue:44987", + "issue:45084", + "issue:45161" + ], + "soft_pairs": [ + "issue:40990|issue:43421", + "issue:43425|issue:45161", + "issue:43883|issue:44488", + "issue:44164|issue:44479", + "issue:44265|issue:44610", + "issue:44488|issue:44987", + "issue:43716|issue:45084", + "issue:44206|issue:44220", + "issue:43611|issue:43644" + ], + "prepared_review_unit_hash": "70be9712436004a530c298d5236bf5092e41a7db3d41356f4a4cfa0135159eb8", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12398, + "estimated_input_tokens": 3100, + "estimated_eval_tokens": 6456 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous: the soft-similarity links generally connect unrelated bugs. Only the audio feature-extraction pair (44206/44220) looks plausibly like the same underlying regression.", + "confidence": 0.34, + "canonical_issue_reason": "44206 is the best anchor because it is the more specific report and appears to describe the underlying crash path that 44220 is also pointing at.", + "canonical_pr_reason": null, + "best_issue_reason": "44206 is the most actionable issue in the set and the only one that plausibly serves as a duplicate target.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:40990", + "right": "issue:43421", + "accept": false, + "reason": "Different subsystems and failures: perplexity on GPT-OSS vs runtime special-token/post-processor updates." + }, + { + "left": "issue:43425", + "right": "issue:45161", + "accept": false, + "reason": "Both mention model/runtime compatibility, but one is a Torch-version incompatibility and the other is a GPT-OSS MoE tensor-parallelism issue." + }, + { + "left": "issue:43883", + "right": "issue:44488", + "accept": false, + "reason": "Distinct problems: missing attribute on MolmoForCausalLM vs failure to load a specific model checkpoint." + }, + { + "left": "issue:44164", + "right": "issue:44479", + "accept": false, + "reason": "Save/from_pretrained extra_state handling and video-input regression are unrelated code paths." + }, + { + "left": "issue:44265", + "right": "issue:44610", + "accept": false, + "reason": "torch.export/torch_compilable_check failure is unrelated to OmDet-Turbo processor output-size mismatch." + }, + { + "left": "issue:44488", + "right": "issue:44987", + "accept": false, + "reason": "Both are load failures, but for different models and likely different root causes; too broad to merge as one bug." + }, + { + "left": "issue:43716", + "right": "issue:45084", + "accept": false, + "reason": "Image-preprocessor dtype mismatch and template-node compilation failure are unrelated." + }, + { + "left": "issue:44206", + "right": "issue:44220", + "accept": true, + "reason": "Both point to the same audio feature-extraction path and likely the same fbank/center-arg crash regression." + }, + { + "left": "issue:43611", + "right": "issue:43644", + "accept": false, + "reason": "Two separate Transformers 5.0 regressions with different symptoms and code paths: base_model_prefix loading vs non-persistent buffers." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 22, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:34567", + "issue:40444", + "issue:42947", + "issue:43317", + "issue:43452", + "issue:43482", + "issue:43541", + "issue:43643", + "issue:43644", + "issue:43646", + "issue:43883", + "issue:44451", + "issue:44530", + "issue:44756", + "issue:44987", + "issue:44991", + "issue:45084" + ], + "soft_pairs": [ + "issue:42947|issue:44756", + "issue:34567|issue:40444", + "issue:43883|issue:44991", + "issue:43452|issue:43482", + "issue:43541|issue:45084", + "issue:44451|issue:44987", + "issue:43643|issue:44530", + "issue:43644|issue:43646", + "issue:43317|issue:43482" + ], + "prepared_review_unit_hash": "6401fa5ec410bd697a9a87777a14feee3afa1119a11fc40a06565059d9332704", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12453, + "estimated_input_tokens": 3114, + "estimated_eval_tokens": 6484 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Most items are unrelated bugs spanning training metrics, model loading, cache behavior, and v5 regressions. The only plausible duplicate pair is the GGUF loading regression affecting auto-loading paths.", + "confidence": 0.78, + "canonical_issue_reason": "Issue 43452 is the clearest concrete bug report and best umbrella for the GGUF auto-loading regression, covering both tokenizer and causal LM load paths.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43452 is the strongest canonical candidate: specific, actionable, and broad enough to represent the shared GGUF loading failure without being overly generic.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42947", + "right": "issue:44756", + "accept": false, + "reason": "Different problems: LoRA/gradient checkpointing behavior vs mmap/OOM on Strix Halo." + }, + { + "left": "issue:34567", + "right": "issue:40444", + "accept": false, + "reason": "Unrelated training-state metric bug vs multimodal IterableDataset failure." + }, + { + "left": "issue:43883", + "right": "issue:44991", + "accept": false, + "reason": "Different model-loading failures with different symptoms and likely different code paths." + }, + { + "left": "issue:43452", + "right": "issue:43482", + "accept": true, + "reason": "Both describe the same GGUF loading regression in the v5 auto-loading path; 43482 looks like a specific model instance of the broader 43452 bug." + }, + { + "left": "issue:43541", + "right": "issue:45084", + "accept": false, + "reason": "Torch dynamo/grouped_mm tracing error is unrelated to a template compilation TypeError." + }, + { + "left": "issue:44451", + "right": "issue:44987", + "accept": false, + "reason": "Both are loading issues, but the models and failure modes are different enough to suggest separate bugs." + }, + { + "left": "issue:43643", + "right": "issue:44530", + "accept": false, + "reason": "Remote-code config field loss is unrelated to PagedAttentionCache linear_attention crashes." + }, + { + "left": "issue:43644", + "right": "issue:43646", + "accept": false, + "reason": "Both mention Transformers 5.0.0 regressions, but one is about non-persistent buffers and the other about custom model initialization; not the same concrete bug." + }, + { + "left": "issue:43317", + "right": "issue:43482", + "accept": false, + "reason": "Dequantized model offload failure is a different loading/offload path than GGUF model loading." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 23, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36032", + "issue:39404", + "issue:42915", + "issue:43262", + "issue:43377", + "issue:43404", + "issue:43645", + "issue:43646", + "issue:43825", + "issue:44403", + "issue:44466", + "issue:44530", + "issue:44534", + "issue:44849", + "issue:44987", + "issue:44991", + "issue:45081", + "issue:45325" + ], + "soft_pairs": [ + "issue:36032|issue:42915", + "issue:43262|issue:43377", + "issue:44849|issue:45325", + "issue:39404|issue:43825", + "issue:44987|issue:44991", + "issue:42915|issue:44530", + "issue:36032|issue:43645", + "issue:43404|issue:44466", + "issue:43646|issue:44534", + "issue:44403|issue:45081" + ], + "prepared_review_unit_hash": "08fc55ec248ca86e1b97d9c30e1ce47ad499fcc721a3249c6c387ef2ec9920d4", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13647, + "estimated_input_tokens": 3412, + "estimated_eval_tokens": 7080 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is mostly a set of unrelated Transformer v5 regression reports across different models and subsystems. None of the proposed soft edges look like true duplicates of the same underlying bug/change.", + "confidence": 0.94, + "canonical_issue_reason": "issue:43645 is the broadest, most representative user-facing v5 regression here (custom model initialization breaking), but the set does not form a coherent duplicate cluster overall.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43645 is the best anchor issue because it describes a general, high-impact regression rather than a model-specific edge case.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:36032", + "right": "issue:42915", + "accept": false, + "reason": "Tokenizer-load conflict in T5 is unrelated to Qwen3Moe FP8 configuration failure." + }, + { + "left": "issue:43262", + "right": "issue:43377", + "accept": false, + "reason": "Both are audio-related, but one is a chat template sampling-rate bug and the other is a batching/padding-mask encoder bug." + }, + { + "left": "issue:44849", + "right": "issue:45325", + "accept": false, + "reason": "Different Qwen model families and different failure modes: hidden-states output vs rope/position-id scaling." + }, + { + "left": "issue:39404", + "right": "issue:43825", + "accept": false, + "reason": "Both involve pipeline/Whisper/translation, but one is a functional return_language regression and the other is an error-message / task-support messaging issue." + }, + { + "left": "issue:44987", + "right": "issue:44991", + "accept": false, + "reason": "Both are loading failures under v5, but they affect different models/tokenizers and do not appear to share the same concrete code path." + }, + { + "left": "issue:42915", + "right": "issue:44530", + "accept": false, + "reason": "Qwen3Moe FP8 failure and Qwen3.5 PagedAttentionCache crash are distinct bugs with different triggers and fixes." + }, + { + "left": "issue:36032", + "right": "issue:43645", + "accept": false, + "reason": "T5 tokenizer special-token conflict is not the same as custom model initialization breaking in Jupyter." + }, + { + "left": "issue:43404", + "right": "issue:44466", + "accept": false, + "reason": "Untied lm_head in Mistral3 is a model-specific tying bug; inconsistent serialization by device is a separate save/load behavior issue." + }, + { + "left": "issue:43646", + "right": "issue:44534", + "accept": false, + "reason": "Custom model initialization breakage and non-persistent buffer junk initialization are different regressions." + }, + { + "left": "issue:44403", + "right": "issue:45081", + "accept": false, + "reason": "Generic load noise is not the same as the Mistral tokenizer backend_tokenizer crash with fix_mistral_regex=True." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 24, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:41553", + "issue:42915", + "issue:43322", + "issue:43377", + "issue:43540", + "issue:43582", + "issue:43645", + "issue:43819", + "issue:43994", + "issue:44220", + "issue:44451", + "issue:44534", + "issue:44610", + "issue:44683", + "issue:45081", + "issue:45084", + "issue:45216", + "issue:45357" + ], + "soft_pairs": [ + "issue:44451|issue:45081", + "issue:43994|issue:44610", + "issue:43322|issue:43540", + "issue:43645|issue:44534", + "issue:45216|issue:45357", + "issue:44220|issue:44683", + "issue:43377|issue:43819", + "issue:41553|issue:43582", + "issue:42915|issue:45084" + ], + "prepared_review_unit_hash": "fefd9be2297c33e8046aed6501851bb0179680f2c18ed084e44541d7c67753b1", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13041, + "estimated_input_tokens": 3261, + "estimated_eval_tokens": 6778 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is mostly not a single duplicate cluster: the items span unrelated bugs across tokenizers, multimodal models, audio, compilation, and platform-specific failures. The only strong duplicate-looking pair is the Qwen3.5 save_pretrained regression (45216/45357), where 45357 appears to be a more specific follow-up to the same checkpoint-key bug.", + "confidence": 0.88, + "canonical_issue_reason": "No single canonical issue for the whole set; the only true duplicate pair is 45216/45357, and 45216 is the better anchor because it is broader and earlier.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45216 is the best representative of the only genuine duplicate thread in this batch: it states the broader Qwen3.5 save_pretrained regression that 45357 narrows down to incorrect visual encoder keys.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44451", + "right": "issue:45081", + "accept": false, + "reason": "Both are tokenizer-loading failures, but they target different models and different root causes: ScandiBERT load failure vs Mistral regex patch crash." + }, + { + "left": "issue:43994", + "right": "issue:44610", + "accept": false, + "reason": "Both involve model/processor output mismatches, but the affected models and failure modes are unrelated (SigLIP2 AutoModel/pipeline vs OmDet-Turbo image size mismatch)." + }, + { + "left": "issue:43322", + "right": "issue:43540", + "accept": false, + "reason": "Different multimodal bugs with different code paths: Llava Next loading segfault vs Qwen3OmniMoe video-input ValueError." + }, + { + "left": "issue:43645", + "right": "issue:44534", + "accept": false, + "reason": "Both are Transformers v5 regressions, but one is Jupyter custom-model initialization and the other is non-persistent buffer initialization; not the same bug." + }, + { + "left": "issue:45216", + "right": "issue:45357", + "accept": true, + "reason": "Same underlying Qwen3.5 save_pretrained regression: both report incorrect saved checkpoint contents, with 45357 adding the specific visual-encoder-key symptom." + }, + { + "left": "issue:44220", + "right": "issue:44683", + "accept": false, + "reason": "Unrelated issues: one is about _torch_extract_fbank_features, the other about compiled flex_attention on torch >= 2.9." + }, + { + "left": "issue:43377", + "right": "issue:43819", + "accept": false, + "reason": "Both concern audio/model correctness, but they are different components and bugs: MIMI padding-mask batching vs DAC from_latents/STE mismatch." + }, + { + "left": "issue:41553", + "right": "issue:43582", + "accept": false, + "reason": "Completely different problems: Voxtral AutoTokenizer error-message quality vs Apple Silicon caching_allocator_warmup TypeError." + }, + { + "left": "issue:42915", + "right": "issue:45084", + "accept": false, + "reason": "No shared bug or code path: Qwen3Moe FineGrainedFP8Config failure vs a template-compilation TypeError." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 25, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:40990", + "issue:42915", + "issue:43296", + "issue:43404", + "issue:43531", + "issue:43632", + "issue:44062", + "issue:44265", + "issue:44291", + "issue:44479", + "issue:44756", + "issue:44811", + "issue:44987", + "issue:45072", + "issue:45081", + "issue:45127", + "issue:45325" + ], + "soft_pairs": [ + "issue:43632|issue:44756", + "issue:44987|issue:45081", + "issue:43296|issue:43531", + "issue:43404|issue:45127", + "issue:44062|issue:44987", + "issue:44479|issue:45325", + "issue:40990|issue:44811", + "issue:44265|issue:45072", + "issue:42915|issue:44291" + ], + "prepared_review_unit_hash": "730389e49cbda655f46f79089c1440ca8ade92a3ecdc38c1dfd7b3fcdfb614c3", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12748, + "estimated_input_tokens": 3187, + "estimated_eval_tokens": 6630 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues touch different models, regressions, and code paths, so there is no true duplicate core to consolidate.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43632", + "right": "issue:44756", + "accept": false, + "reason": "Different bugs: one is an _is_hf_initialized/v5 compatibility regression, the other is an mmap/OOM platform issue." + }, + { + "left": "issue:44987", + "right": "issue:45081", + "accept": false, + "reason": "Both involve model loading, but they fail in different tokenizer paths and for different models; not the same underlying bug." + }, + { + "left": "issue:43296", + "right": "issue:43531", + "accept": false, + "reason": "Different targets and failures: PaddleOCR-VL load failure vs Qwen3-MoE sliding_window behavior." + }, + { + "left": "issue:43404", + "right": "issue:45127", + "accept": false, + "reason": "One is lm_head weight tying in Mistral3ForConditionalGeneration; the other is LoRA merge collapse with extended vocab. Distinct code paths." + }, + { + "left": "issue:44062", + "right": "issue:44987", + "accept": false, + "reason": "AddedToken constructor error is unrelated to the physical-intelligence/fast load failure." + }, + { + "left": "issue:44479", + "right": "issue:45325", + "accept": false, + "reason": "Both are Qwen VL regressions, but one is video-input handling and the other is still-image rope/index scaling; not the same defect." + }, + { + "left": "issue:40990", + "right": "issue:44811", + "accept": false, + "reason": "Perplexity on GPT-OSS and Whisper batch_decode skip_special_tokens are unrelated bugs." + }, + { + "left": "issue:44265", + "right": "issue:45072", + "accept": false, + "reason": "torch.export with torch_compilable_check and bfloat16 dtype mismatches are different failure modes." + }, + { + "left": "issue:42915", + "right": "issue:44291", + "accept": false, + "reason": "FP8 config failure for Qwen3Moe is separate from the _is_hf_initialized/init_empty_weights argument error." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 26, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36032", + "issue:40444", + "issue:42915", + "issue:43317", + "issue:43421", + "issue:43531", + "issue:43611", + "issue:43632", + "issue:43742", + "issue:43818", + "issue:43856", + "issue:44534", + "issue:44568", + "issue:44589", + "issue:44877", + "issue:45325", + "issue:45357" + ], + "soft_pairs": [ + "issue:43317|issue:43856", + "issue:43611|issue:44534", + "issue:42915|issue:44589", + "issue:43421|issue:44568", + "issue:36032|issue:43742", + "issue:43632|issue:44534", + "issue:40444|issue:45325", + "issue:43818|issue:45357", + "issue:43531|issue:44877" + ], + "prepared_review_unit_hash": "064827165742e80c8f19a5ea967b9d94c82212ac036d9f44af416d52f95c46e4", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12559, + "estimated_input_tokens": 3140, + "estimated_eval_tokens": 6536 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is a grab bag of unrelated loading, tokenizer, multimodal, quantization, and config issues. None of the soft pairs looks like the same underlying bug/change, so I would not consolidate any of them.", + "confidence": 0.93, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43317", + "right": "issue:43856", + "accept": false, + "reason": "Both involve Qwen-related model behavior, but one is a device_map/offload loading failure and the other is MoE training memory usage; different code paths and fixes." + }, + { + "left": "issue:43611", + "right": "issue:44534", + "accept": false, + "reason": "One is a base_model_prefix loading regression and the other is non-persistent buffers being initialized with junk; unrelated mechanisms." + }, + { + "left": "issue:42915", + "right": "issue:44589", + "accept": false, + "reason": "Both touch float8/quantization-adjacent failures, but one is Qwen3MoE with FineGrainedFP8Config while the other is a storage-type TypeError; not the same bug." + }, + { + "left": "issue:43421", + "right": "issue:44568", + "accept": false, + "reason": "Both concern special tokens, but one is runtime post-processor syncing and the other is add_special_tokens not adding BOS/EOS for a specific tokenizer; distinct issues." + }, + { + "left": "issue:36032", + "right": "issue:43742", + "accept": false, + "reason": "Tokenizer method-name conflict vs key error loading a different model; no shared underlying code path." + }, + { + "left": "issue:43632", + "right": "issue:44534", + "accept": false, + "reason": "_is_hf_initialized flag regression and junk-filled buffers are separate v5 breakages with different root causes." + }, + { + "left": "issue:40444", + "right": "issue:45325", + "accept": false, + "reason": "Both are Qwen2.5-VL, but one is finetuning with multiple images per prompt and the other is rope-index scaling for still-image positions; different bugs." + }, + { + "left": "issue:43818", + "right": "issue:45357", + "accept": false, + "reason": "Different models and different failures: Video-LLaVA video tower initialization vs Qwen3.5 save_pretrained visual encoder keys." + }, + { + "left": "issue:43531", + "right": "issue:44877", + "accept": false, + "reason": "Sliding-window behavior in Qwen3-MoE and strict config parsing for granite_speech are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 27, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:33666", + "issue:35707", + "issue:36032", + "issue:40444", + "issue:41720", + "issue:43425", + "issue:43792", + "issue:44079", + "issue:44186", + "issue:44265", + "issue:44403", + "issue:44534", + "issue:44589", + "issue:44683", + "issue:44821", + "issue:44991", + "issue:45020", + "issue:45084" + ], + "soft_pairs": [ + "issue:44683|issue:45020", + "issue:44821|issue:45084", + "issue:43425|issue:44403", + "issue:36032|issue:41720", + "issue:43792|issue:44589", + "issue:44186|issue:44265", + "issue:44403|issue:44991", + "issue:33666|issue:40444", + "issue:35707|issue:40444", + "issue:44079|issue:44534" + ], + "prepared_review_unit_hash": "637aa550913fa50ee6c748f41f49cc6d41c20e2bd1881914b19aabc794d02b77", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13176, + "estimated_input_tokens": 3294, + "estimated_eval_tokens": 6844 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a true duplicate set. The items cover unrelated failures across tokenizer loading, Qwen/VL finetuning, model output semantics, torch.export, device mapping, and version compatibility. The soft links are mostly keyword overlap around Transformers, not the same bug.", + "confidence": 0.97, + "canonical_issue_reason": "No single issue cleanly represents the cluster; if one must be chosen, #44403 is the broadest open report and the closest to a generic Transformers-loading theme, but it is not a true duplicate hub.", + "canonical_pr_reason": null, + "best_issue_reason": "#44403 is the most general and still-open issue in the set, so it is the least specific representative. The rest are narrower or clearly different failure modes.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44683", + "right": "issue:45020", + "accept": false, + "reason": "Different bugs: compiled flex_attention on torch>=2.9 vs remote_code breakage in recent Transformers." + }, + { + "left": "issue:44821", + "right": "issue:45084", + "accept": false, + "reason": "Unrelated failures: loading AutoImageProcessor from a URL vs compilation of non-template nodes." + }, + { + "left": "issue:43425", + "right": "issue:44403", + "accept": false, + "reason": "Torch 2.10 incompatibility is a version-specific runtime problem; the other is generic load-noise behavior." + }, + { + "left": "issue:36032", + "right": "issue:41720", + "accept": false, + "reason": "Tokenizer metaclass/method conflict and Qwen3 CUDA assert are entirely different code paths." + }, + { + "left": "issue:43792", + "right": "issue:44589", + "accept": false, + "reason": "Whisper model load/runtime failure does not match the Float8 storage lookup error." + }, + { + "left": "issue:44186", + "right": "issue:44265", + "accept": false, + "reason": "LayoutLMv2 tokenizer crashes and torch.export/torch_compilable_check failures are distinct subsystems." + }, + { + "left": "issue:44403", + "right": "issue:44991", + "accept": false, + "reason": "Both mention loading, but one is about noisy logging and the other is a tokenizer compatibility failure in v5." + }, + { + "left": "issue:33666", + "right": "issue:40444", + "accept": false, + "reason": "Both involve Qwen-VL, but one is multi-GPU training while the other is iterable dataset/multi-image finetuning failure." + }, + { + "left": "issue:35707", + "right": "issue:40444", + "accept": false, + "reason": "Progressive generation with inputs_embeds/past_key_values is unrelated to multi-image iterable-dataset finetuning." + }, + { + "left": "issue:44079", + "right": "issue:44534", + "accept": false, + "reason": "ModelOutput key assignment and non-persistent buffer initialization are different internal bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 28, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:41720", + "issue:42175", + "issue:43296", + "issue:43482", + "issue:43525", + "issue:43531", + "issue:43606", + "issue:43644", + "issue:43749", + "issue:43761", + "issue:43819", + "issue:43881", + "issue:44079", + "issue:44265", + "issue:44291", + "issue:44488", + "issue:44589", + "issue:45081" + ], + "soft_pairs": [ + "issue:43482|issue:43525", + "issue:44291|issue:44589", + "issue:43606|issue:44265", + "issue:43606|issue:43819", + "issue:43749|issue:44079", + "issue:43761|issue:44079", + "issue:43644|issue:44079", + "issue:42175|issue:43296", + "issue:43881|issue:44488", + "issue:41720|issue:45081", + "issue:43531|issue:44291" + ], + "prepared_review_unit_hash": "62ba72393751e03510cbea31248f6fa435441ddc0eae52766e334c2f2ad6d83b", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13715, + "estimated_input_tokens": 3429, + "estimated_eval_tokens": 7114 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is mostly a set of unrelated model-loading/regression issues; the soft-similarity links do not indicate the same underlying bug or change. No PRs are present, and none of the issue pairs are good duplicate merges.", + "confidence": 0.94, + "canonical_issue_reason": "Issue 44079 is the best representative if one issue must anchor the set: it describes a core library-level regression in ModelOutput assignment, which is broader than the more model-specific failures here.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44079 has the broadest scope and the clearest reusable triage value among otherwise unrelated reports, so it is the best single issue to surface.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43482", + "right": "issue:43525", + "accept": false, + "reason": "Different failures and code paths: Qwen2.5-GGUF/transformers-v5 loading vs Llama4Config missing pad_token_id." + }, + { + "left": "issue:44291", + "right": "issue:44589", + "accept": false, + "reason": "Both are TypeErrors, but the causes differ: init_empty_weights/_is_hf_initialized mismatch vs missing Float8 storage type." + }, + { + "left": "issue:43606", + "right": "issue:44265", + "accept": false, + "reason": "CPU-offload device mismatch in bark-small is unrelated to torch.export export failures around torch_compilable_check." + }, + { + "left": "issue:43606", + "right": "issue:43819", + "accept": false, + "reason": "Different model and failure mode: bark-small CPU offload device mismatch vs DAC.from_latents not matching forward due to missing STE." + }, + { + "left": "issue:43749", + "right": "issue:44079", + "accept": false, + "reason": "FSDP CPU RAM efficient loading is a distributed-loading regression; ModelOutput key assignment is a separate container/output bug." + }, + { + "left": "issue:43761", + "right": "issue:44079", + "accept": false, + "reason": "Both touch outputs, but one is CLIPVisionModel hidden_states behavior and the other is generic ModelOutput key assignment; not the same bug." + }, + { + "left": "issue:43644", + "right": "issue:44079", + "accept": false, + "reason": "Non-persistent buffer corruption in Transformers 5.0.0 is a different regression from ModelOutput key handling." + }, + { + "left": "issue:42175", + "right": "issue:43296", + "accept": false, + "reason": "Packaging/backend selection for pip extras vs a PaddleOCR-VL model load failure; unrelated subsystems and fixes." + }, + { + "left": "issue:43881", + "right": "issue:44488", + "accept": false, + "reason": "glm-4v-9b loading failure and cjvt/sleng-bert loading failure are different model-specific issues." + }, + { + "left": "issue:41720", + "right": "issue:45081", + "accept": false, + "reason": "Qwen3 auto device mapping CUDA assert on A800 is unrelated to the Mistral tokenizer regex patch crash." + }, + { + "left": "issue:43531", + "right": "issue:44291", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and init_empty_weights argument handling are different bugs with no shared concrete code path." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 29, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:40444", + "issue:42915", + "issue:43454", + "issue:43482", + "issue:43493", + "issue:43611", + "issue:43632", + "issue:43646", + "issue:43828", + "issue:43994", + "issue:44387", + "issue:44466", + "issue:44610", + "issue:44617", + "issue:44683", + "issue:45072", + "issue:45325", + "issue:45381" + ], + "soft_pairs": [ + "issue:40444|issue:45381", + "issue:44610|issue:45325", + "issue:45325|issue:45381", + "issue:43828|issue:44683", + "issue:44387|issue:44617", + "issue:43611|issue:43646", + "issue:43632|issue:45072", + "issue:42915|issue:43482", + "issue:43493|issue:43994", + "issue:43454|issue:44466" + ], + "prepared_review_unit_hash": "5fa1f30f22e562dc035068a28561475e7d54ec71c6b9b7743a84a6afe4108c5c", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13399, + "estimated_input_tokens": 3350, + "estimated_eval_tokens": 6956 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly a heterogeneous cluster with a few real duplicate-like pairs. The clearest duplicate subclusters are Qwen2.5-VL position-id/rope-index bugs and a SigLIP2 output-discrepancy pair; most other soft links are only thematically related, not the same bug.", + "confidence": 0.79, + "canonical_issue_reason": "No single canonical for the entire set, but issue 45325 is the best anchor for the strongest subcluster because it states the concrete Qwen2.5-VL rope-index/position_id defect most directly.", + "canonical_pr_reason": null, + "best_issue_reason": "45325 is the most actionable representative: it describes the underlying positional-indexing bug clearly, and the related reports are downstream manifestations in similar Qwen2.5-VL image/video paths.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:40444", + "right": "issue:45381", + "accept": false, + "reason": "Both are Qwen2.5-VL multimodal failures, but one is IterableDataset multi-image training breakage and the other is video vision_position_ids; too different to be the same bug." + }, + { + "left": "issue:44610", + "right": "issue:45325", + "accept": false, + "reason": "Different models and different failures: OmDet-Turbo processor size mismatch vs Qwen2.5-VL rope-index scaling." + }, + { + "left": "issue:45325", + "right": "issue:45381", + "accept": true, + "reason": "Same concrete Qwen2.5-VL positional-indexing path: still-image temporal position_ids vs video vision_position_ids both point to the same rope-index bug." + }, + { + "left": "issue:43828", + "right": "issue:44683", + "accept": false, + "reason": "Unrelated failures: autocast dtype mismatch in Phi-tiny-MoE vs torch>=2.9 flex_attention compilation breakage." + }, + { + "left": "issue:44387", + "right": "issue:44617", + "accept": false, + "reason": "Both mention OOM, but the causes are unrelated: int4 reserved-memory growth vs Sam3Video memory use." + }, + { + "left": "issue:43611", + "right": "issue:43646", + "accept": false, + "reason": "Both are Transformers v5 loading/init regressions, but they affect different mechanisms: base_model_prefix loading vs custom model initialization." + }, + { + "left": "issue:43632", + "right": "issue:45072", + "accept": false, + "reason": "Different bugs in different code paths: _is_hf_initialized flag handling vs dtype mismatch in SwitchTransformers/TimmWrapperModel." + }, + { + "left": "issue:42915", + "right": "issue:43482", + "accept": false, + "reason": "Different models and different breakages: FineGrainedFP8Config on Qwen3Moe vs Qwen2.5-GGUF loading failure in v5." + }, + { + "left": "issue:43493", + "right": "issue:43994", + "accept": true, + "reason": "Same underlying SigLIP2 implementation problem: HF output deviates from the original/JAX behavior and produces nonsensical downstream results." + }, + { + "left": "issue:43454", + "right": "issue:44466", + "accept": false, + "reason": "Both involve lm_head/tied weights, but one is a model-specific weight-tying failure and the other is a broader device-dependent serialization inconsistency; not the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 30, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:34634", + "issue:35707", + "issue:43262", + "issue:43317", + "issue:43526", + "issue:43746", + "issue:43792", + "issue:43873", + "issue:43881", + "issue:43883", + "issue:43957", + "issue:44186", + "issue:44291", + "issue:44877", + "issue:44898", + "issue:44991", + "issue:45072", + "issue:45325" + ], + "soft_pairs": [ + "issue:43792|issue:43957", + "issue:43526|issue:43883", + "issue:44186|issue:45072", + "issue:43881|issue:44991", + "issue:43317|issue:43873", + "issue:43262|issue:43746", + "issue:44898|issue:45325", + "issue:44291|issue:44877", + "issue:34634|issue:35707" + ], + "prepared_review_unit_hash": "9b682366704ceb8192506bdb9d8ee3af7ee027509f3824eff87ea1681a95a546", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13230, + "estimated_input_tokens": 3308, + "estimated_eval_tokens": 6872 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues span unrelated bugs across audio, tokenization, vision, model loading, quantization/offload, and config handling. None of the soft pairs look like true duplicates; at most a few are in the same broad area, but not the same concrete bug.", + "confidence": 0.93, + "canonical_issue_reason": "No single canonical duplicate stands out; if a representative is needed, issue 43873 is the broadest and most central model-loading/offloading report in the set.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43873 is the best overall representative because it is the most generic and widely applicable report here, centered on offloading/quantization rather than a model-specific edge case.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43792", + "right": "issue:43957", + "accept": false, + "reason": "Both are loading-related, but they concern different models and different failure modes (Whisper runtime failure vs meta-device loading breakage)." + }, + { + "left": "issue:43526", + "right": "issue:43883", + "accept": false, + "reason": "These are unrelated bugs in different components: image processor label reduction vs a model attribute error during loading." + }, + { + "left": "issue:44186", + "right": "issue:45072", + "accept": false, + "reason": "Tokenizer crash on NER/padding is unrelated to dtype mismatches in bfloat16 inference." + }, + { + "left": "issue:43881", + "right": "issue:44991", + "accept": false, + "reason": "Both involve loading failures, but one is a model load failure and the other is a tokenizer load failure with different likely causes." + }, + { + "left": "issue:43317", + "right": "issue:43873", + "accept": false, + "reason": "Both mention offloading/quantization, but the titles point to different concrete failures: dequantized model loading with device_map=auto vs general offloading behavior with quantization." + }, + { + "left": "issue:43262", + "right": "issue:43746", + "accept": false, + "reason": "Audio processor chat-template sampling-rate defaults are unrelated to GraniteSpeech PEFT checkpoint loading." + }, + { + "left": "issue:44898", + "right": "issue:45325", + "accept": false, + "reason": "Both touch vision position handling, but they are different code paths and models: Perceiver interpolation vs Qwen2.5-VL rope/temporal indexing." + }, + { + "left": "issue:44291", + "right": "issue:44877", + "accept": false, + "reason": "One is an init_empty_weights argument incompatibility; the other is strict config validation blocking granite_speech config loading." + }, + { + "left": "issue:34634", + "right": "issue:35707", + "accept": false, + "reason": "BarkProcessor voice_preset and progressive generation with inputs_embeds/past_key_values are unrelated bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 31, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:35707", + "issue:39692", + "issue:40444", + "issue:41720", + "issue:43296", + "issue:43377", + "issue:43550", + "issue:43632", + "issue:43720", + "issue:43749", + "issue:43792", + "issue:44164", + "issue:44534", + "issue:44610", + "issue:44683", + "issue:44877", + "issue:44898", + "issue:44991" + ], + "soft_pairs": [ + "issue:43550|issue:43720", + "issue:43632|issue:44164", + "issue:35707|issue:41720", + "issue:44610|issue:44898", + "issue:43296|issue:44877", + "issue:43296|issue:44991", + "issue:43377|issue:44610", + "issue:43792|issue:44683", + "issue:43377|issue:44898", + "issue:39692|issue:40444", + "issue:43749|issue:44534" + ], + "prepared_review_unit_hash": "787130400ce0fe913bd9b6b444a63ac537184620f028900fcf0f15ec01ace30d", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13609, + "estimated_input_tokens": 3403, + "estimated_eval_tokens": 7062 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issue titles point to different bugs/regressions across unrelated models, loading paths, and runtime features. None of the soft pairs looks like the same concrete underlying defect, so all soft matches should be rejected.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43550", + "right": "issue:43720", + "accept": false, + "reason": "Both involve model/runtime failures, but one is a torch.compile+SDPA bug in Bamba-9B-v2 and the other is a packed-weights loading bug in BitNet AutoBitLinear; different code paths and likely different fixes." + }, + { + "left": "issue:43632", + "right": "issue:44164", + "accept": false, + "reason": "Both relate to loading/saving behavior, but one is about the `_is_hf_initialized` flag in Transformers v5 and the other is about preserving `extra_state`; these are distinct serialization issues." + }, + { + "left": "issue:35707", + "right": "issue:41720", + "accept": false, + "reason": "Progressive generation with `inputs_embeds`/`past_key_values` is unrelated to a Qwen3 auto device mapping cuda assert; different subsystems and failure modes." + }, + { + "left": "issue:44610", + "right": "issue:44898", + "accept": false, + "reason": "Both are vision-model shape/processor complaints, but one is an OmDet-Turbo processor/model resolution mismatch and the other is a Perceiver interpolation failure with non-default image size; not the same bug." + }, + { + "left": "issue:43296", + "right": "issue:44877", + "accept": false, + "reason": "Both mention loading/config compatibility in v5-era Transformers, but one is PaddleOCR-VL loading in vLLM and the other is strict config handling for granite_speech; separate model-specific breakages." + }, + { + "left": "issue:43296", + "right": "issue:44991", + "accept": false, + "reason": "These are different loading failures: PaddleOCR-VL in vLLM versus tokenizer loading for EMBEDDIA/est-roberta. Shared version context is not enough to merge them." + }, + { + "left": "issue:43377", + "right": "issue:44610", + "accept": false, + "reason": "MIMI padding-mask batch inconsistency and OmDet-Turbo processor/model size mismatch are different bug classes affecting different components." + }, + { + "left": "issue:43792", + "right": "issue:44683", + "accept": false, + "reason": "Whisper-large-v2 runtime failure and compiled flex_attention failing on torch>=2.9 are unrelated; one is model execution, the other is an attention/compiler compatibility regression." + }, + { + "left": "issue:43377", + "right": "issue:44898", + "accept": false, + "reason": "Batch-vs-single output drift from missing padding mask support is not the same as Perceiver failing on non-default resolution despite interpolation; separate models and root causes." + }, + { + "left": "issue:39692", + "right": "issue:40444", + "accept": false, + "reason": "SigLIP2 doc example errors and Qwen2.5-VL finetuning with multiple images per prompt are unrelated documentation/example versus training pipeline issues." + }, + { + "left": "issue:43749", + "right": "issue:44534", + "accept": false, + "reason": "Both concern Transformers v5 loading/state behavior, but FSDP CPU RAM efficient loading and non-persistent buffers filled with junk are different mechanisms and fixes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 32, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:34567", + "issue:40990", + "issue:41720", + "issue:43322", + "issue:43404", + "issue:43454", + "issue:43526", + "issue:43550", + "issue:43606", + "issue:43881", + "issue:44186", + "issue:44423", + "issue:44466", + "issue:44610", + "issue:44977", + "issue:45072", + "issue:45357", + "issue:45406" + ], + "soft_pairs": [ + "issue:43550|issue:43606", + "issue:44466|issue:45357", + "issue:43454|issue:45072", + "issue:43454|issue:43550", + "issue:44423|issue:45406", + "issue:41720|issue:43322", + "issue:44186|issue:44610", + "issue:41720|issue:44977", + "issue:43404|issue:45357", + "issue:43526|issue:43881", + "issue:34567|issue:40990" + ], + "prepared_review_unit_hash": "830d69ffe544fb93dab37dd06829e32623d15b9a61a3ff9de0072a2f8052e662", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13840, + "estimated_input_tokens": 3460, + "estimated_eval_tokens": 7176 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is very heterogeneous: the issues share only loose themes like multimodal/model-loading/training bugs, but they appear to describe distinct code paths and failures. I would not merge any of the soft pairs.", + "confidence": 0.89, + "canonical_issue_reason": "issue:44466 is the strongest representative because it is a concrete regression with a clear, well-scoped serialization/tied-weights failure and broad practical impact.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44466 has the cleanest bug definition and most actionable reproduction surface among the set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43550", + "right": "issue:43606", + "accept": false, + "reason": "Different failures and mechanisms: torch.compile/SDPA vs CPU offload device mismatch; same broad CI/model area only." + }, + { + "left": "issue:44466", + "right": "issue:45357", + "accept": false, + "reason": "Both involve save/pretrained behavior, but one is tied-weight serialization and the other is incorrect visual encoder keys in Qwen3.5; not the same bug." + }, + { + "left": "issue:43454", + "right": "issue:45072", + "accept": false, + "reason": "Different model families and symptoms: lm_head tying bug vs bfloat16 dtype mismatch in inference." + }, + { + "left": "issue:43454", + "right": "issue:43550", + "accept": false, + "reason": "Unrelated concrete problems: weight tying in a multimodal model vs compile/SDPA failure in Bamba." + }, + { + "left": "issue:44423", + "right": "issue:45406", + "accept": false, + "reason": "Both are serve crashes, but the error locations differ (`str.to` vs missing `_tokenizer`) and point to different processor bugs." + }, + { + "left": "issue:41720", + "right": "issue:43322", + "accept": false, + "reason": "Different models and failure modes: cudaErrorAssert during auto device mapping vs segmentation fault when loading Llava Next." + }, + { + "left": "issue:44186", + "right": "issue:44610", + "accept": false, + "reason": "Different components: tokenizer crashes on NER/padding vs processor image-size mismatch for a model." + }, + { + "left": "issue:41720", + "right": "issue:44977", + "accept": false, + "reason": "Same model family only; one is an auto-device-map loading/assert issue, the other is flash-attention generation quality/behavior." + }, + { + "left": "issue:43404", + "right": "issue:45357", + "accept": false, + "reason": "Both mention image-text models, but one is an lm_head tying bug and the other is a save_pretrained key regression." + }, + { + "left": "issue:43526", + "right": "issue:43881", + "accept": false, + "reason": "Completely different failures: BeitImageProcessorFast label reduction vs glm-4v-9b loading failure." + }, + { + "left": "issue:34567", + "right": "issue:40990", + "accept": false, + "reason": "No meaningful overlap: TrainerState token counter bug vs perplexity on a specific model/dataset." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 33, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:42617", + "issue:42915", + "issue:43377", + "issue:43454", + "issue:43577", + "issue:43606", + "issue:43708", + "issue:43720", + "issue:43873", + "issue:44186", + "issue:44361", + "issue:44479", + "issue:44756", + "issue:44811", + "issue:44877", + "issue:44918", + "issue:44991" + ], + "soft_pairs": [ + "issue:43454|issue:43720", + "issue:43873|issue:44756", + "issue:43577|issue:43708", + "issue:43377|issue:44811", + "issue:44186|issue:44361", + "issue:44877|issue:44991", + "issue:42617|issue:42915", + "issue:44479|issue:44918", + "issue:43606|issue:43720" + ], + "prepared_review_unit_hash": "86a0310b5a1e9daabe28213442bcd9ac984a9418d99e5a32f934daf9f229a084", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12552, + "estimated_input_tokens": 3138, + "estimated_eval_tokens": 6532 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The items are mostly unrelated bug reports across different models, tokenizers, training, and loading paths. None of the soft pairs are strong enough to treat as duplicates; they share broad subsystem language at most, not the same underlying defect.", + "confidence": 0.92, + "canonical_issue_reason": "If one issue must serve as the closest representative, issue #43873 is the broadest and most central-looking open bug report, but the set does not form a single duplicate cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #43873 is the best standalone canonical candidate because it is an open, broadly described bug around offloading and quantization, which could absorb related follow-ups if any existed.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43454", + "right": "issue:43720", + "accept": false, + "reason": "Different models and failure modes: AyaVision weight tying vs BitNet packed-weight unpacking during accelerate loading." + }, + { + "left": "issue:43873", + "right": "issue:44756", + "accept": false, + "reason": "Both mention offloading/memory, but one is quantization behavior and the other is Strix Halo mmap OOM; different root causes and contexts." + }, + { + "left": "issue:43577", + "right": "issue:43708", + "accept": false, + "reason": "Blip2 dtype loading bug vs Trainer checkpoint-resume max_steps calculation bug; unrelated subsystems." + }, + { + "left": "issue:43377", + "right": "issue:44811", + "accept": false, + "reason": "MIMI encoder padding-mask batching issue vs Whisper batch_decode skip_special_tokens bug; different components and defects." + }, + { + "left": "issue:44186", + "right": "issue:44361", + "accept": false, + "reason": "Both are tokenizer bugs, but they affect different tokenizers and different crash paths, so they are not the same underlying issue." + }, + { + "left": "issue:44877", + "right": "issue:44991", + "accept": false, + "reason": "Strict config loading for granite_speech vs tokenizer loading for EMBEDDIA/est-roberta; different model/config compatibility problems." + }, + { + "left": "issue:42617", + "right": "issue:42915", + "accept": false, + "reason": "A 3d_parallel.py execution problem and a Qwen3Moe FineGrainedFP8Config failure are distinct issues with no concrete shared code path." + }, + { + "left": "issue:44479", + "right": "issue:44918", + "accept": false, + "reason": "Video-input regression across Qwen VL variants vs Qwen3.5 embedding unpacking with TRL SFT trainer; different features and code paths." + }, + { + "left": "issue:43606", + "right": "issue:43720", + "accept": false, + "reason": "CPU offload device mismatch in bark-small vs packed-weight unpacking for BitNet; both touch loading/offload, but not the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 34, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36331", + "issue:42617", + "issue:42915", + "issue:42994", + "issue:43316", + "issue:43317", + "issue:43482", + "issue:43531", + "issue:43550", + "issue:43572", + "issue:43653", + "issue:44292", + "issue:44479", + "issue:44671", + "issue:44743", + "issue:44898", + "issue:44913", + "issue:45072" + ], + "soft_pairs": [ + "issue:42994|issue:43482", + "issue:44671|issue:44743", + "issue:36331|issue:42915", + "issue:42915|issue:43317", + "issue:36331|issue:42617", + "issue:43316|issue:44913", + "issue:43550|issue:43653", + "issue:44898|issue:45072", + "issue:43531|issue:43572", + "issue:44292|issue:44479" + ], + "prepared_review_unit_hash": "f88443c9e59af7916cbc9f1994f51f5a1fdbacd3aa2fce0dd3d0a9a916dce0e3", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13211, + "estimated_input_tokens": 3303, + "estimated_eval_tokens": 6862 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is heterogeneous: training API mismatch, quantization/loading issues, config serialization bugs, tokenizer regressions, and several model-specific inference failures. None of the soft pairs look like the same underlying bug, so there are no duplicate merges here.", + "confidence": 0.93, + "canonical_issue_reason": "No single canonical duplicate emerges; the issues span distinct bugs across different models and subsystems, and the soft pairs are only superficial text similarity.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44479 is the strongest standalone representative: it describes a concrete regression, names multiple affected model variants, and is more broadly scoped than the other reports. Still, it does not duplicate the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42994", + "right": "issue:43482", + "accept": false, + "reason": "Both involve quantized/model-format workflows, but one is about saving quantized models and the other about GGUF loading failure in v5; different code paths and failure modes." + }, + { + "left": "issue:44671", + "right": "issue:44743", + "accept": false, + "reason": "Completely different bugs: CamemBERT masked-LM prediction errors versus Qwen3.5 recurrent state reset when using cache." + }, + { + "left": "issue:36331", + "right": "issue:42915", + "accept": false, + "reason": "Training API signature mismatch in CustomTrainer is unrelated to a Qwen3MoE + FineGrainedFP8Config failure." + }, + { + "left": "issue:42915", + "right": "issue:43317", + "accept": false, + "reason": "One is a FineGrainedFP8Config/model failure; the other is a device_map/offload dequantized-model loading issue. Same broad area, different bug." + }, + { + "left": "issue:36331", + "right": "issue:42617", + "accept": false, + "reason": "Unexpected compute_loss keyword argument error is unrelated to running 3d_parallel.py." + }, + { + "left": "issue:43316", + "right": "issue:44913", + "accept": false, + "reason": "Both are config-related, but one is a Gemma3TextConfig API discrepancy and the other is GPTNeoX rotary_pct not persisting on reload." + }, + { + "left": "issue:43550", + "right": "issue:43653", + "accept": false, + "reason": "Bamba torch.compile/SDPA failure and BigBirdTokenizer special-token registration are unrelated subsystems and failure modes." + }, + { + "left": "issue:44898", + "right": "issue:45072", + "accept": false, + "reason": "Perceiver non-default-resolution image classification failure is not the same bug as bf16 dtype mismatches in SwitchTransformers/TimmWrapperModel." + }, + { + "left": "issue:43531", + "right": "issue:43572", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and StableLmConfig missing pad_token_idx are distinct model/config issues." + }, + { + "left": "issue:44292", + "right": "issue:44479", + "accept": false, + "reason": "Both mention Qwen-family models, but one is an NVFP4 runtime/loading error and the other is a video-input regression across several VL/MoE models." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 35, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:33357", + "issue:35707", + "issue:36010", + "issue:41720", + "issue:42617", + "issue:42915", + "issue:43322", + "issue:43388", + "issue:43540", + "issue:43606", + "issue:43653", + "issue:43782", + "issue:43873", + "issue:44514", + "issue:44545", + "issue:45059", + "issue:45161" + ], + "soft_pairs": [ + "issue:43606|issue:43653", + "issue:41720|issue:45161", + "issue:41720|issue:42617", + "issue:43388|issue:45059", + "issue:43540|issue:43782", + "issue:42915|issue:43322", + "issue:41720|issue:43873", + "issue:36010|issue:42617", + "issue:44514|issue:44545", + "issue:33357|issue:35707" + ], + "prepared_review_unit_hash": "46c19de58603e3e3df57b617f0a6a0d142a1d0aa864a7660db933da8ee1218f9", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12754, + "estimated_input_tokens": 3189, + "estimated_eval_tokens": 6634 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is mostly heterogeneous: it contains several unrelated model/runtime bugs across different subsystems. The only clear duplicate pair is the Qwen2_5_VLProcessor batched padding=False crash (issues 44514/44545).", + "confidence": 0.91, + "canonical_issue_reason": "issue:44514 is the strongest canonical candidate for the only true duplicate pair: it is earlier, more detailed, and already has inbound references, while issue:44545 is a near-identical follow-up report.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44514 is the best representative issue in this set because it anchors the only confirmed duplicate pair and has the richer report. The rest of the items are too unrelated to serve as a single cluster representative.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43606", + "right": "issue:43653", + "accept": false, + "reason": "Different bugs in different components: CPU offload device mismatch for suno/bark-small vs BigBirdTokenizer special-token decode behavior." + }, + { + "left": "issue:41720", + "right": "issue:45161", + "accept": false, + "reason": "Both involve model parallelism, but the concrete failures and models differ: cudaErrorAssert on Qwen3 auto device mapping vs GPT-OSS MoE TP not working." + }, + { + "left": "issue:41720", + "right": "issue:42617", + "accept": false, + "reason": "Different models and failure modes; Qwen3 A800 device-map assert is not the same as failing to run 3d_parallel.py." + }, + { + "left": "issue:43388", + "right": "issue:45059", + "accept": false, + "reason": "Unrelated areas: gather_for_metrics label truncation vs SAM3 PCS text/bounding-box behavior." + }, + { + "left": "issue:43540", + "right": "issue:43782", + "accept": false, + "reason": "Both are Qwen3-family reports, but one is video-input validation in Qwen3OmniMoe and the other is a from_pretrained weight_only loading error in Qwen3VL; not the same bug." + }, + { + "left": "issue:42915", + "right": "issue:43322", + "accept": false, + "reason": "Different models and code paths: Qwen3Moe FP8 config failure vs Llava Next segmentation fault on load." + }, + { + "left": "issue:41720", + "right": "issue:43873", + "accept": false, + "reason": "Too broad a similarity around offloading/parallelism; the concrete bugs differ between auto device mapping assert and quantization offloading behavior." + }, + { + "left": "issue:36010", + "right": "issue:42617", + "accept": false, + "reason": "No duplicate signal beyond being general runtime issues; ImportError for GenerationMixin is unrelated to 3d_parallel.py." + }, + { + "left": "issue:44514", + "right": "issue:44545", + "accept": true, + "reason": "Near-verbatim titles describing the same crash in Qwen2_5_VLProcessor.apply_chat_template on batched input with padding=False." + }, + { + "left": "issue:33357", + "right": "issue:35707", + "accept": false, + "reason": "Completely different problems: MacOS bus error with pretrained community CLIP vs progressive generation with inputs_embeds and past_key_values." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 36, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:30064", + "issue:34634", + "issue:41720", + "issue:43381", + "issue:43606", + "issue:43653", + "issue:43720", + "issue:43746", + "issue:43782", + "issue:43844", + "issue:44368", + "issue:44451", + "issue:44492", + "issue:44509", + "issue:44991", + "issue:45072", + "issue:45357", + "issue:45381" + ], + "soft_pairs": [ + "issue:30064|issue:34634", + "issue:41720|issue:43746", + "issue:44492|issue:44509", + "issue:43653|issue:43720", + "issue:44368|issue:45357", + "issue:43381|issue:43844", + "issue:43606|issue:45072", + "issue:44451|issue:44991", + "issue:43782|issue:45381" + ], + "prepared_review_unit_hash": "34e9c597d835c1259f2f5e9c52bb5128c885bd87ef684dbed0efd0d1584325e8", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13226, + "estimated_input_tokens": 3307, + "estimated_eval_tokens": 6870 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set: the issues span unrelated models and failure modes (segmentation maps, Bark voice presets, Qwen device mapping, tokenizer registration, PEFT/local checkpoint loading, docs/typos, etc.). No soft pair looks like the same underlying bug or change, so there is no clear canonical issue to anchor the cluster.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:30064", + "right": "issue:34634", + "accept": false, + "reason": "Different processors and bugs: image segmentation-map handling vs BarkProcessor voice_preset behavior." + }, + { + "left": "issue:41720", + "right": "issue:43746", + "accept": false, + "reason": "Different failure paths: Qwen3 auto device mapping CUDA assert vs GraniteSpeech PEFT adapter local checkpoint loading." + }, + { + "left": "issue:44492", + "right": "issue:44509", + "accept": false, + "reason": "One is a typo in cache strategy text; the other is stale docs for removed pipeline tasks." + }, + { + "left": "issue:43653", + "right": "issue:43720", + "accept": false, + "reason": "Unrelated components: BigBirdTokenizer special-token registration vs BitNet packed-weight unpacking during accelerate load." + }, + { + "left": "issue:44368", + "right": "issue:45357", + "accept": false, + "reason": "Both mention Qwen3.5, but one is a training-time warning about tie_word_embeddings and the other is a save_pretrained regression for visual encoder keys." + }, + { + "left": "issue:43381", + "right": "issue:43844", + "accept": false, + "reason": "Both involve gradients, but one is eval-mode checkpointing misuse and the other is ZeRO-3 training instability; not the same bug." + }, + { + "left": "issue:43606", + "right": "issue:45072", + "accept": false, + "reason": "Both are mismatch bugs, but they affect different models/code paths and different tensor properties (device offload vs bfloat16 dtype)." + }, + { + "left": "issue:44451", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer-loading failures, but for different models and likely different root causes." + }, + { + "left": "issue:43782", + "right": "issue:45381", + "accept": false, + "reason": "Different Qwen VL problems: from_pretrained weight_only error vs incorrect vision_position_ids for video input." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 37, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:34567", + "issue:34634", + "issue:35707", + "issue:41720", + "issue:42886", + "issue:43421", + "issue:43550", + "issue:43720", + "issue:43819", + "issue:43927", + "issue:43994", + "issue:44062", + "issue:44361", + "issue:44451", + "issue:44671", + "issue:44843", + "issue:44918", + "issue:44936" + ], + "soft_pairs": [ + "issue:44918|issue:44936", + "issue:44062|issue:44451", + "issue:34634|issue:41720", + "issue:43550|issue:44361", + "issue:43550|issue:43927", + "issue:42886|issue:44843", + "issue:34567|issue:35707", + "issue:43421|issue:43927", + "issue:43720|issue:43819", + "issue:43994|issue:44671" + ], + "prepared_review_unit_hash": "ed5dae8ab1f6187b9df34b90b09d521729900bbc42d14a0b8a4705afcda8af70", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13366, + "estimated_input_tokens": 3342, + "estimated_eval_tokens": 6940 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "One soft match stands out: the two offline-tokenizer reports appear to describe the same HF_HUB_OFFLINE/cache-loading failure. The rest are different model-, tokenizer-, or trainer-specific bugs that should remain separate.", + "confidence": 0.77, + "canonical_issue_reason": "issue:44843 is the strongest canonical issue: it states the offline failure clearly, names the exact code path (_patch_mistral_regex), and explains the likely root cause (unconditional model_info() call).", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44843 is the best overall issue for triage because it is the most specific and actionable report of the offline tokenizer regression, with a clear reproduction context and probable fix location.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44918", + "right": "issue:44936", + "accept": false, + "reason": "Both involve trainer behavior, but one is about unpacking Qwen3.5 input embeddings in TRL SFT and the other is a generic train/evaluate failure; different bugs." + }, + { + "left": "issue:44062", + "right": "issue:44451", + "accept": false, + "reason": "Both are tokenizer/loading-related at a high level, but one is an AddedToken constructor error and the other is a model load failure for ScandiBERT; not the same code path." + }, + { + "left": "issue:34634", + "right": "issue:41720", + "accept": false, + "reason": "Unrelated subsystems: BarkProcessor voice_preset vs Qwen3 auto device mapping cudaErrorAssert." + }, + { + "left": "issue:43550", + "right": "issue:44361", + "accept": false, + "reason": "Different bugs and components: torch.compile/SDPA on Bamba versus an MLukeTokenizer AttributeError." + }, + { + "left": "issue:43550", + "right": "issue:43927", + "accept": false, + "reason": "One is a compile-time/model execution issue; the other is config persistence losing custom token IDs. Too different to merge." + }, + { + "left": "issue:42886", + "right": "issue:44843", + "accept": true, + "reason": "Same underlying offline tokenizer-loading regression: both report HF_HUB_OFFLINE/cache failure, and 44843 identifies the concrete culprit in _patch_mistral_regex/model_info()." + }, + { + "left": "issue:34567", + "right": "issue:35707", + "accept": false, + "reason": "TrainerState token-count tracking and progressive generation with inputs_embeds/past_key_values are unrelated bugs." + }, + { + "left": "issue:43421", + "right": "issue:43927", + "accept": false, + "reason": "Both touch tokenizer metadata, but one is runtime post-processor refresh and the other is save/load loss of custom token IDs causing IndexError; different failures." + }, + { + "left": "issue:43720", + "right": "issue:43819", + "accept": false, + "reason": "Distinct model-specific issues: BitNet accelerate loading unpacking versus DAC.from_latents mismatch with missing STE." + }, + { + "left": "issue:43994", + "right": "issue:44671", + "accept": false, + "reason": "Both report incorrect outputs, but on different models and pipelines; no evidence they share the same root cause." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 38, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:39692", + "issue:43421", + "issue:43525", + "issue:43531", + "issue:43596", + "issue:43653", + "issue:44077", + "issue:44186", + "issue:44220", + "issue:44589", + "issue:44610", + "issue:44625", + "issue:44898", + "issue:44987", + "issue:45042", + "issue:45072", + "issue:45081", + "issue:45356" + ], + "soft_pairs": [ + "issue:43531|issue:44589", + "issue:44987|issue:45042", + "issue:44077|issue:44625", + "issue:43421|issue:43653", + "issue:43421|issue:44186", + "issue:44186|issue:44898", + "issue:45081|issue:45356", + "issue:39692|issue:44610", + "issue:44610|issue:45072", + "issue:43596|issue:44220", + "issue:43525|issue:43531" + ], + "prepared_review_unit_hash": "f49199be0777d5dd49a152fdb09a4eda9a1180194493a80973c34039641cef33", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13883, + "estimated_input_tokens": 3471, + "estimated_eval_tokens": 7198 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are mostly a grab-bag of unrelated bugs across tokenizers, model configs, image processors, and inference/runtime issues. None of the soft pairs look like the same underlying bug strongly enough to merge.", + "confidence": 0.93, + "canonical_issue_reason": "issue:45081 is the strongest single representative here: it describes a concrete, reproducible tokenizer-loading crash with a clearly named failing code path (`_patch_mistral_regex`) and active user impact.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45081 is the best overall anchor because it is specific, actionable, and tied to a distinct failure path that could be fixed directly; several other issues are broader or more model-specific.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43531", + "right": "issue:44589", + "accept": false, + "reason": "Different subsystems and failures: Qwen3-MoE sliding-window behavior vs a Float8 storage lookup error. No shared code path." + }, + { + "left": "issue:44987", + "right": "issue:45042", + "accept": false, + "reason": "Both involve loading/inference regressions, but one is a model-loading failure for `physical-intelligence/fast` and the other is an image-processor dependency regression requiring torchvision. Not the same bug." + }, + { + "left": "issue:44077", + "right": "issue:44625", + "accept": false, + "reason": "`post_init` allowance for patchtsmixer is unrelated to Qwen3.5 label propagation between configs." + }, + { + "left": "issue:43421", + "right": "issue:43653", + "accept": false, + "reason": "Both mention tokenizer special-token handling, but one is a backend post-processor update bug at runtime while the other is a BigBirdTokenizer mask-token registration issue. Different concrete defects." + }, + { + "left": "issue:43421", + "right": "issue:44186", + "accept": false, + "reason": "Different tokenizer failures: runtime special-token refresh vs LayoutLMv2 NER/batched padding crashes." + }, + { + "left": "issue:44186", + "right": "issue:44898", + "accept": false, + "reason": "LayoutLMv2 tokenizer padding/truncation crashes and Perceiver interpolation at non-default resolution are unrelated model-specific bugs." + }, + { + "left": "issue:45081", + "right": "issue:45356", + "accept": false, + "reason": "Both touch Mistral-related tokenizer handling, but one is a crash in `_patch_mistral_regex` and the other is a Kimi-K2.5 regression about codec handling and a misleading warning. Related area, not the same bug." + }, + { + "left": "issue:39692", + "right": "issue:44610", + "accept": false, + "reason": "SigLIP2 docs/quantization example issues are unrelated to OmDet-Turbo processor/model resolution mismatch." + }, + { + "left": "issue:44610", + "right": "issue:45072", + "accept": false, + "reason": "One is an image processor output-size mismatch; the other is a dtype mismatch in bfloat16 inference. Different failure modes." + }, + { + "left": "issue:43596", + "right": "issue:44220", + "accept": false, + "reason": "DeepSpeed ZeRO-3/BertModel initialization crash is unrelated to `_torch_extract_fbank_features()`." + }, + { + "left": "issue:43525", + "right": "issue:43531", + "accept": false, + "reason": "Llama4 `pad_token_id` config error is unrelated to Qwen3-MoE sliding-window behavior." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 39, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:39692", + "issue:40444", + "issue:43540", + "issue:43550", + "issue:43653", + "issue:43716", + "issue:43746", + "issue:43819", + "issue:43873", + "issue:44008", + "issue:44186", + "issue:44220", + "issue:44442", + "issue:44661", + "issue:44843", + "issue:45081", + "issue:45381" + ], + "soft_pairs": [ + "issue:43746|issue:45381", + "issue:43540|issue:44008", + "issue:43653|issue:43819", + "issue:44220|issue:44661", + "issue:44843|issue:45081", + "issue:39692|issue:43873", + "issue:43716|issue:44008", + "issue:43550|issue:43819", + "issue:40444|issue:43746", + "issue:44186|issue:44442", + "issue:43746|issue:43873" + ], + "prepared_review_unit_hash": "cf37cdcd194c3f0f4af30db87c7cb5e8074ffba286023bfb797a297ce5db40f1", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 11, + "serialized_chars": 13337, + "estimated_input_tokens": 3335, + "estimated_eval_tokens": 6926 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set does not look like one duplicate cluster; it is a collection of unrelated bug reports across tokenizers, multimodal models, quantization/offloading, and preprocessing. None of the soft pairs appear to be the same underlying bug or change.", + "confidence": 0.86, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue:43873 is the broadest, still-open report with multiple inbound references and active discussion, so it is the most useful anchor among otherwise unrelated issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43746", + "right": "issue:45381", + "accept": false, + "reason": "Different models and failure modes: GraniteSpeech PEFT local-checkpoint loading vs Qwen2.5-VL video vision_position_ids." + }, + { + "left": "issue:43540", + "right": "issue:44008", + "accept": false, + "reason": "Different subsystems and bugs: Qwen2.5-VL iterable-dataset multi-image finetuning vs Gemma3n audio_mel_mask name collision." + }, + { + "left": "issue:43653", + "right": "issue:43819", + "accept": false, + "reason": "Unrelated tokenizer special-token bug vs DAC latent/STE forward mismatch." + }, + { + "left": "issue:44220", + "right": "issue:44661", + "accept": false, + "reason": "Different problems: fbank feature extraction issue vs add-new-model-like failing inside TOKENIZER_MAPPING_NAMES." + }, + { + "left": "issue:44843", + "right": "issue:45081", + "accept": false, + "reason": "Same area (_patch_mistral_regex) but distinct concrete failures: offline model_info call vs backend_tokenizer AttributeError." + }, + { + "left": "issue:39692", + "right": "issue:43873", + "accept": false, + "reason": "Both mention quantization, but one is a SigLIP2 docs example mismatch/failure and the other is a general offloading+quantization behavior issue." + }, + { + "left": "issue:43716", + "right": "issue:44008", + "accept": false, + "reason": "Different multimodal regressions: Mistral-3 image dtype mismatch vs Gemma3n audio tensor attribute collision." + }, + { + "left": "issue:43550", + "right": "issue:43819", + "accept": false, + "reason": "Different code paths and model families: torch.compile+SDPA in Bamba vs DAC from_latents/STE mismatch." + }, + { + "left": "issue:40444", + "right": "issue:43746", + "accept": false, + "reason": "Different models and bugs: Qwen2.5-VL multi-image iterable dataset finetuning vs GraniteSpeech adapter loading from local checkpoints." + }, + { + "left": "issue:44186", + "right": "issue:44442", + "accept": false, + "reason": "Both tokenizer-related, but the failures are unrelated: LayoutLMv2 NER/padding crashes vs AutoTokenizer loading FastSpeech2ConformerTokenizer." + }, + { + "left": "issue:43746", + "right": "issue:43873", + "accept": false, + "reason": "Shared theme of model loading/quantization only; the concrete bugs are unrelated (PEFT local checkpoints vs offloading with quantization)." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 40, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:30064", + "issue:36010", + "issue:43335", + "issue:43421", + "issue:43540", + "issue:43550", + "issue:43577", + "issue:43618", + "issue:43697", + "issue:43720", + "issue:43756", + "issue:43818", + "issue:44062", + "issue:44488", + "issue:44811", + "issue:45072", + "issue:45381" + ], + "soft_pairs": [ + "issue:43618|issue:43697", + "issue:43335|issue:43756", + "issue:44062|issue:44488", + "issue:43720|issue:45072", + "issue:43550|issue:45072", + "issue:30064|issue:36010", + "issue:43421|issue:44811", + "issue:43540|issue:45381", + "issue:43577|issue:43818" + ], + "prepared_review_unit_hash": "8a9370021c36b95c0be5cad88900b6235d7b4774351a64b729d298a3006ab112", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12676, + "estimated_input_tokens": 3169, + "estimated_eval_tokens": 6594 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a false-positive mix of unrelated transformer bugs across image/video processing, tokenization, dtype/config handling, and model-specific regressions. The soft pairs are mostly lexical matches, not the same underlying defect.", + "confidence": 0.96, + "canonical_issue_reason": "No single canonical issue fits: the items describe distinct bugs in different subsystems and model families.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43618 is the most self-contained and clearly stated, but it is not representative of the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43618", + "right": "issue:43697", + "accept": false, + "reason": "Different regressions: CLIPOutput attentions field vs RTDetrV2 output changes in v5." + }, + { + "left": "issue:43335", + "right": "issue:43756", + "accept": false, + "reason": "Unrelated bugs: SwitchTransformers sparse-layer config vs Smollm3 RoPE layer loss." + }, + { + "left": "issue:44062", + "right": "issue:44488", + "accept": false, + "reason": "Both tokenization-related, but one is an AddedToken constructor error and the other is a model-loading failure; not the same bug." + }, + { + "left": "issue:43720", + "right": "issue:45072", + "accept": false, + "reason": "Different code paths: BitNet packed-weight loading vs dtype mismatches in SwitchTransformers/TimmWrapperModel." + }, + { + "left": "issue:43550", + "right": "issue:45072", + "accept": false, + "reason": "Both mention runtime/inference issues, but Bamba torch.compile SDPA failure is unrelated to dtype mismatch bugs." + }, + { + "left": "issue:30064", + "right": "issue:36010", + "accept": false, + "reason": "Completely different domains: image segmentation map processing vs import error for GenerationMixin." + }, + { + "left": "issue:43421", + "right": "issue:44811", + "accept": false, + "reason": "Both involve tokenizer/processor behavior, but one is runtime special-token post-processing and the other is Whisper batch_decode skip_special_tokens handling." + }, + { + "left": "issue:43540", + "right": "issue:45381", + "accept": false, + "reason": "Both are video-input bugs, but Qwen3OmniMoe video processing error and qwen2.5-vl vision_position_ids mismatch are distinct failures." + }, + { + "left": "issue:43577", + "right": "issue:43818", + "accept": false, + "reason": "Different multimodal dtype/initialization bugs: Blip2 dtype propagation vs Video-LLaVA missing temporal attention and near-duplicate towers." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 41, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:43531", + "issue:43577", + "issue:43582", + "issue:43643", + "issue:43644", + "issue:43749", + "issue:43782", + "issue:43825", + "issue:43994", + "issue:44448", + "issue:44561", + "issue:44671", + "issue:44877", + "issue:44977", + "issue:45020", + "issue:45081", + "issue:45406" + ], + "soft_pairs": [ + "issue:44561|issue:45020", + "issue:45081|issue:45406", + "issue:43643|issue:44877", + "issue:44448|issue:44671", + "issue:43782|issue:44977", + "issue:43531|issue:43643", + "issue:43577|issue:43582", + "issue:43644|issue:43749", + "issue:43825|issue:43994" + ], + "prepared_review_unit_hash": "ecf218ba833bfa178478c81c357d39c1cf7a3ec59d04dd3ac3f1e964ecde21a4", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12593, + "estimated_input_tokens": 3149, + "estimated_eval_tokens": 6554 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of unrelated issue reports that only share broad Transformers/v5 or model-family wording. I do not see any true duplicate pairs among the soft candidates; the closest semantic overlap is the remote_code regression area, but even that looks like different failures.", + "confidence": 0.87, + "canonical_issue_reason": "issue #45020 is the broadest/highest-level report in the set and the closest thing to a cluster umbrella, but the items here are not true duplicates of one another.", + "canonical_pr_reason": null, + "best_issue_reason": "issue #45020 is the most representative and general issue in this group because it describes a broad class of recent v5 remote_code regressions rather than a single model-specific symptom.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44561", + "right": "issue:45020", + "accept": false, + "reason": "Both mention remote_code/trust_remote_code regressions, but one is specifically about removing is_torch_fx_available and the other is a broader set of recent-version breakages; not clearly the same underlying bug." + }, + { + "left": "issue:45081", + "right": "issue:45406", + "accept": false, + "reason": "Different code paths and failures: Mistral tokenizer regex patching versus Gemma4Processor missing _tokenizer in serve." + }, + { + "left": "issue:43643", + "right": "issue:44877", + "accept": false, + "reason": "Both involve config-loading behavior, but one is missing fields from AutoConfig with trust_remote_code and the other is strict config blocking granite_speech; different bugs." + }, + { + "left": "issue:44448", + "right": "issue:44671", + "accept": false, + "reason": "Different models and symptoms: Pegasus output changes versus CamemBERT masked-LM predictions in v5." + }, + { + "left": "issue:43782", + "right": "issue:44977", + "accept": false, + "reason": "Both are Qwen-related, but one is a from_pretrained weight_only error for Qwen3VL and the other is flash-attention generation issues for Qwen3.5; not the same concrete failure." + }, + { + "left": "issue:43531", + "right": "issue:43643", + "accept": false, + "reason": "No meaningful overlap: sliding_window behavior in Qwen3-MoE versus AutoConfig/trust_remote_code missing fields." + }, + { + "left": "issue:43577", + "right": "issue:43582", + "accept": false, + "reason": "Completely different areas: Blip2 dtype loading versus Apple Silicon caching_allocator_warmup TypeError." + }, + { + "left": "issue:43644", + "right": "issue:43749", + "accept": false, + "reason": "Different regressions: non-persistent buffers being filled with junk versus FSDP CPU RAM efficient loading." + }, + { + "left": "issue:43825", + "right": "issue:43994", + "accept": false, + "reason": "Different problems: an incorrect pipeline error message versus nonsensical SigLIP2 model/pipeline outputs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 42, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:30064", + "issue:36331", + "issue:41720", + "issue:42175", + "issue:43650", + "issue:43720", + "issue:43782", + "issue:43819", + "issue:43825", + "issue:43827", + "issue:43906", + "issue:44361", + "issue:44610", + "issue:44805", + "issue:44936", + "issue:44991", + "issue:45081", + "issue:45325" + ], + "soft_pairs": [ + "issue:43819|issue:44361", + "issue:44805|issue:44936", + "issue:43650|issue:43906", + "issue:41720|issue:44991", + "issue:43720|issue:44610", + "issue:42175|issue:45081", + "issue:43825|issue:43827", + "issue:30064|issue:36331", + "issue:43782|issue:45325" + ], + "prepared_review_unit_hash": "e5347b2823a40db6917e61e61b1cc3f265b6897249e52b89c6ee9e1baaf18e84", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13209, + "estimated_input_tokens": 3303, + "estimated_eval_tokens": 6862 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is highly heterogeneous: the paired items are mostly unrelated bugs in different subsystems/models. I rejected all soft edges; the only loosely related pair is the v5 pipeline-removal docs/runtime-message duo, but they still look like separate fixes rather than the same underlying issue.", + "confidence": 0.93, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43819", + "right": "issue:44361", + "accept": false, + "reason": "Different subsystems and failure modes: DAC latent/STE mismatch vs MLukeTokenizer task AttributeError." + }, + { + "left": "issue:44805", + "right": "issue:44936", + "accept": false, + "reason": "Generic tensor mask shape mismatch is not the same as trainer.evaluate() failing after train(); too different a code path." + }, + { + "left": "issue:43650", + "right": "issue:43906", + "accept": false, + "reason": "No shared bug: one is a vague/placeholder issue, the other is an isolated reproduction of a specific upstream report." + }, + { + "left": "issue:41720", + "right": "issue:44991", + "accept": false, + "reason": "Unrelated problems: Qwen3 auto device mapping CUDA assert vs tokenizer loading for a specific RoBERTa model." + }, + { + "left": "issue:43720", + "right": "issue:44610", + "accept": false, + "reason": "Different concrete failures: BitNet packed-weight loading under accelerate vs OmDet-Turbo processor image-size mismatch." + }, + { + "left": "issue:42175", + "right": "issue:45081", + "accept": false, + "reason": "Packaging/backend install issue is unrelated to a tokenizer regex patch crash during Mistral loading." + }, + { + "left": "issue:43825", + "right": "issue:43827", + "accept": false, + "reason": "Both concern v5 pipeline removals, but one is a runtime error message and the other is stale docs; separate artifacts and fixes." + }, + { + "left": "issue:30064", + "right": "issue:36331", + "accept": false, + "reason": "Image processor segmentation-map handling and CustomTrainer compute_loss kwarg regression are unrelated." + }, + { + "left": "issue:43782", + "right": "issue:45325", + "accept": false, + "reason": "Different Qwen VL models and different bugs: weight_only loading error vs rope-index position scaling issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 43, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:34634", + "issue:36010", + "issue:43262", + "issue:43526", + "issue:43582", + "issue:43653", + "issue:43697", + "issue:43749", + "issue:43819", + "issue:44060", + "issue:44291", + "issue:44368", + "issue:44485", + "issue:44509", + "issue:44554", + "issue:44671", + "issue:44857" + ], + "soft_pairs": [ + "issue:44485|issue:44509", + "issue:34634|issue:36010", + "issue:44060|issue:44368", + "issue:43582|issue:44857", + "issue:43749|issue:44291", + "issue:43526|issue:43697", + "issue:43653|issue:44671", + "issue:43526|issue:44554", + "issue:43262|issue:43819" + ], + "prepared_review_unit_hash": "f14d457dda99b4252fe2970b5e19b70d241787841355e6c43bba946d383ee0df", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12719, + "estimated_input_tokens": 3180, + "estimated_eval_tokens": 6616 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues span unrelated bugs in audio processors, tokenizers, model loading, vision processors, attention behavior, docs, and warnings. I do not see a true duplicate set or a meaningful canonical issue to merge around.", + "confidence": 0.97, + "canonical_issue_reason": "No canonical issue: the items are not duplicates and do not share a single underlying bug or change.", + "canonical_pr_reason": null, + "best_issue_reason": "No single issue is a good representative; the cluster should be split because the reports cover distinct subsystems and failure modes.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44485", + "right": "issue:44509", + "accept": false, + "reason": "Different topics: GLM-5 RoPE implementation vs removed docs pipeline tasks. No shared bug or code path." + }, + { + "left": "issue:34634", + "right": "issue:36010", + "accept": false, + "reason": "BarkProcessor voice_preset failure and GenerationMixin ImportError are unrelated errors in different areas." + }, + { + "left": "issue:44060", + "right": "issue:44368", + "accept": false, + "reason": "Both mention tie-weight warnings, but one is a Qwen3-Next incorrect parameter tying bug and the other is a generic LoRA config warning." + }, + { + "left": "issue:43582", + "right": "issue:44857", + "accept": false, + "reason": "Apple Silicon caching allocator TypeError and CUDA AMP image-loss crash are unrelated platform-specific issues." + }, + { + "left": "issue:43749", + "right": "issue:44291", + "accept": false, + "reason": "Both involve model loading, but one is FSDP CPU RAM efficient loading and the other is an init_empty_weights signature mismatch; not the same bug." + }, + { + "left": "issue:43526", + "right": "issue:43697", + "accept": false, + "reason": "BeitImageProcessorFast reduce_labels bug and RTDetrV2 output mismatch are different models and different failure modes." + }, + { + "left": "issue:43653", + "right": "issue:44671", + "accept": false, + "reason": "Tokenizer special-token registration bug and CamemBERT masked-LM prediction bug are not the same underlying issue." + }, + { + "left": "issue:43526", + "right": "issue:44554", + "accept": false, + "reason": "A vision processor label-handling bug is unrelated to an MPS attention correctness issue." + }, + { + "left": "issue:43262", + "right": "issue:43819", + "accept": false, + "reason": "Audio apply_chat_template sampling-rate default mismatch and DAC.from_latents forward mismatch are distinct audio bugs with different code paths." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 44, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:34634", + "issue:36331", + "issue:43257", + "issue:43329", + "issue:43452", + "issue:43454", + "issue:43577", + "issue:43638", + "issue:43653", + "issue:43746", + "issue:43854", + "issue:44589", + "issue:44811", + "issue:44912", + "issue:44936", + "issue:44977", + "issue:44991", + "issue:45042" + ], + "soft_pairs": [ + "issue:43452|issue:44991", + "issue:43638|issue:44936", + "issue:43454|issue:43577", + "issue:43746|issue:44811", + "issue:34634|issue:36331", + "issue:43653|issue:44912", + "issue:44589|issue:45042", + "issue:43854|issue:44977", + "issue:43257|issue:43329" + ], + "prepared_review_unit_hash": "0c59736d18e62e2a9ea4654b67e7c4096f8d2eb3393b815c01864445215deb4f", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13271, + "estimated_input_tokens": 3318, + "estimated_eval_tokens": 6892 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No soft pair looks like the same underlying bug/change; the issues span unrelated models, trainer APIs, tokenizers, quantization, and processor logic. I would not merge any of the suggested pairs.", + "confidence": 0.93, + "canonical_issue_reason": "No clear canonical duplicate exists in this set because the items are unrelated issues rather than one bug reported multiple times. If forced to pick a representative report, issue 44977 is the clearest standalone bug report.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44977 is the most self-contained and concrete (specific model, specific failure mode, clear symptom), so it is the best representative issue if one must be chosen.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43452", + "right": "issue:44991", + "accept": false, + "reason": "Both are loading/tokenizer problems, but they describe different failure modes: gguf_file handling for AutoTokenizer/AutoModel versus a transformers 5.0 tokenizer regression for a specific model." + }, + { + "left": "issue:43638", + "right": "issue:44936", + "accept": false, + "reason": "Unrelated bugs: one is a Zero3/non-pretrained Bert IndexError during training, the other is evaluate() failing after train()." + }, + { + "left": "issue:43454", + "right": "issue:43577", + "accept": false, + "reason": "Different models and different root causes: lm_head weight tying in AyaVision versus dtype propagation for Blip2/Qformer." + }, + { + "left": "issue:43746", + "right": "issue:44811", + "accept": false, + "reason": "These affect different code paths and symptoms: GraniteSpeech local checkpoint loading with PEFT versus Whisper processor batch_decode skip_special_tokens handling." + }, + { + "left": "issue:34634", + "right": "issue:36331", + "accept": false, + "reason": "No shared underlying bug: BarkProcessor voice_preset is unrelated to a CustomTrainer.compute_loss signature mismatch." + }, + { + "left": "issue:43653", + "right": "issue:44912", + "accept": false, + "reason": "Tokenizer special-token registration causing empty decode is not the same as MXFP4 quantization falling back to bf16." + }, + { + "left": "issue:44589", + "right": "issue:45042", + "accept": false, + "reason": "A float8 storage lookup error is unrelated to PIL image processors unnecessarily requiring torchvision." + }, + { + "left": "issue:43854", + "right": "issue:44977", + "accept": false, + "reason": "Different models and paths: GLM-4.7-Flash unit-test loading versus Qwen3.5 flash-attention generation behavior." + }, + { + "left": "issue:43257", + "right": "issue:43329", + "accept": false, + "reason": "These are distinct: Qwen3 MOE weight conversion under accelerate/deepspeed versus a multimodal token-count helper using undefined video-branch symbols." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 45, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36010", + "issue:38175", + "issue:39692", + "issue:42175", + "issue:43257", + "issue:43262", + "issue:43531", + "issue:43645", + "issue:43696", + "issue:43749", + "issue:44568", + "issue:44610", + "issue:44756", + "issue:44811", + "issue:44912", + "issue:44938", + "issue:45042", + "issue:45356" + ], + "soft_pairs": [ + "issue:44568|issue:45356", + "issue:36010|issue:43645", + "issue:38175|issue:44610", + "issue:43257|issue:43696", + "issue:39692|issue:44912", + "issue:42175|issue:44938", + "issue:43262|issue:44811", + "issue:43749|issue:44756", + "issue:43531|issue:45042" + ], + "prepared_review_unit_hash": "233917ac4d22f3abd9b467179cfd692ac540269b2f97cfcdaafbb6e2122b8367", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13138, + "estimated_input_tokens": 3285, + "estimated_eval_tokens": 6826 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are broadly heterogeneous and do not look like duplicate reports of one underlying bug. They span unrelated areas such as tokenizers, processors, loading, quantization, environment/backend packaging, and model-specific failures; all soft-edge pairs should be rejected.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "issue #43749 is the strongest representative only in the sense of triage signal: it has the most discussion and inbound references, and it describes a clear, core loading regression. It is not a duplicate hub for the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44568", + "right": "issue:45356", + "accept": false, + "reason": "Both are tokenizer-related, but they describe different regressions: missing BOS/EOS insertion for mDeBERTa vs codec handling and warning behavior for Kimi-K2.5." + }, + { + "left": "issue:36010", + "right": "issue:43645", + "accept": false, + "reason": "Different problems: a specific GenerationMixin import error vs a general Transformers 5.0 notebook custom-model initialization regression." + }, + { + "left": "issue:38175", + "right": "issue:44610", + "accept": false, + "reason": "Different model behaviors and code paths: SigLIP2 zero probabilities vs OmDet-Turbo processor output-size mismatch." + }, + { + "left": "issue:43257", + "right": "issue:43696", + "accept": false, + "reason": "Qwen3 MoE weight conversion under accelerate/deepspeed is unrelated to GPT-oss-20b CUDA OOM on load." + }, + { + "left": "issue:39692", + "right": "issue:44912", + "accept": false, + "reason": "One is a SigLIP2 documentation/example error; the other is an MXFP4 quantization fallback issue for git-oss-20b. Same general theme, but not the same bug." + }, + { + "left": "issue:42175", + "right": "issue:44938", + "accept": false, + "reason": "Backend packaging/install dependency issue vs Python 3.14 loading failure; different failure modes and likely different root causes." + }, + { + "left": "issue:43262", + "right": "issue:44811", + "accept": false, + "reason": "Both involve audio processors, but one is a chat-template sampling-rate default bug and the other is batch_decode ignoring skip_special_tokens." + }, + { + "left": "issue:43749", + "right": "issue:44756", + "accept": false, + "reason": "Both mention memory/loading pain points, but one is broken FSDP CPU RAM efficient loading and the other is Strix Halo mmap-induced OOM; not the same underlying defect." + }, + { + "left": "issue:43531", + "right": "issue:45042", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior is unrelated to PIL backend processors incorrectly requiring torchvision." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 46, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36010", + "issue:36331", + "issue:41720", + "issue:43257", + "issue:43329", + "issue:43452", + "issue:43540", + "issue:43550", + "issue:43582", + "issue:43697", + "issue:43756", + "issue:43931", + "issue:44361", + "issue:44442", + "issue:44479", + "issue:44912", + "issue:45357" + ], + "soft_pairs": [ + "issue:43329|issue:44479", + "issue:43257|issue:43756", + "issue:43550|issue:44912", + "issue:36010|issue:36331", + "issue:43697|issue:45357", + "issue:43257|issue:43931", + "issue:41720|issue:43540", + "issue:43452|issue:43582", + "issue:44361|issue:44442" + ], + "prepared_review_unit_hash": "8b6867095de75343e97cf8510b71d7768b6cdef9fd055557837d9afaa3ff63c6", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12656, + "estimated_input_tokens": 3164, + "estimated_eval_tokens": 6584 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "One likely duplicate pair stands out around the Qwen video-input regression; the rest are clearly separate bugs affecting different code paths, models, or failure modes.", + "confidence": 0.74, + "canonical_issue_reason": "Among the accepted duplicate pair, issue 44479 is the better canonical because it cleanly describes the broader v5.3.0 video-input regression across multiple Qwen VL/Omni variants, while 43329 is a narrower root-cause report for the same failure area.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44479 is the strongest standalone representative: it is explicit about the regression scope, affected model families, and version, making it the most useful anchor for deduplication.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43329", + "right": "issue:44479", + "accept": true, + "reason": "Both describe the same video-input failure path in Qwen multimodal models. 43329 identifies the likely root cause in the video branch, and 44479 reports the broader regression across the same video-capable Qwen variants." + }, + { + "left": "issue:43257", + "right": "issue:43756", + "accept": false, + "reason": "Different bugs: one is Qwen3 MoE weight-conversion/loading with accelerate+deepspeed, the other is a Smollm3 RoPE-layer-count issue." + }, + { + "left": "issue:43550", + "right": "issue:44912", + "accept": false, + "reason": "Different failure modes and subsystems: torch.compile/SDPA on Bamba versus MXFP4 quantization fallback on git-oss-20b." + }, + { + "left": "issue:36010", + "right": "issue:36331", + "accept": false, + "reason": "Unrelated symptoms: an import path break for GenerationMixin versus a Trainer subclass signature mismatch." + }, + { + "left": "issue:43697", + "right": "issue:45357", + "accept": false, + "reason": "One is an inference-output consistency regression in RTDetrV2; the other is an incorrect save_pretrained key regression for Qwen3.5 visual encoder weights." + }, + { + "left": "issue:43257", + "right": "issue:43931", + "accept": false, + "reason": "Both involve Qwen models, but one is a deepspeed/accelerate conversion problem and the other is a model weight-shape mismatch during loading; not the same bug." + }, + { + "left": "issue:41720", + "right": "issue:43540", + "accept": false, + "reason": "Different models and code paths: auto device mapping with cudaErrorAssert on Qwen3 versus video-input validation errors in Qwen3OmniMoe." + }, + { + "left": "issue:43452", + "right": "issue:43582", + "accept": false, + "reason": "Tokenizer/model-loading issues for gguf files are unrelated to an Apple Silicon allocator warmup TypeError." + }, + { + "left": "issue:44361", + "right": "issue:44442", + "accept": false, + "reason": "These are separate tokenizer-loading failures for different tokenizers and different underlying causes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 47, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:42175", + "issue:42915", + "issue:43329", + "issue:43450", + "issue:43540", + "issue:43577", + "issue:43824", + "issue:43906", + "issue:44062", + "issue:44291", + "issue:44479", + "issue:44488", + "issue:44560", + "issue:44821", + "issue:44991", + "issue:45042", + "issue:45072", + "issue:45381" + ], + "soft_pairs": [ + "issue:44062|issue:44991", + "issue:43329|issue:43450", + "issue:44479|issue:45381", + "issue:42175|issue:43824", + "issue:43540|issue:44560", + "issue:44291|issue:45042", + "issue:43577|issue:45072", + "issue:42175|issue:44488", + "issue:42915|issue:44821", + "issue:43906|issue:45381", + "issue:43540|issue:44479" + ], + "prepared_review_unit_hash": "6dfb1a28c5762b129dd0f2a2b069be44564ece61ec3e7953fc984da01ee1a788", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13822, + "estimated_input_tokens": 3456, + "estimated_eval_tokens": 7168 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is mostly heterogeneous issue reports. Only the Qwen video-regression pair looks plausibly duplicate; the other soft pairs are related by broad subsystem words but describe different concrete bugs.", + "confidence": 0.74, + "canonical_issue_reason": "issue:44479 is the strongest canonical issue because it describes the broadest concrete regression surface (multiple Qwen VL variants on 5.3.0) and likely subsumes the narrower qwen2.5-vl video-position-id report.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44479 is the best global issue candidate for consolidation: it is the broadest, most central report among the only plausible duplicate pocket and is the most likely umbrella tracker.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44062", + "right": "issue:44991", + "accept": false, + "reason": "Different failure modes: AddedToken/special-kwarg constructor error vs tokenizer loading failure for a specific model. Same tokenizer area, but not the same underlying bug." + }, + { + "left": "issue:43329", + "right": "issue:43450", + "accept": false, + "reason": "Both involve video processors, but one is an undefined-name bug in multimodal token counting and the other is an incorrect batched output shape. Distinct concrete problems." + }, + { + "left": "issue:44479", + "right": "issue:45381", + "accept": true, + "reason": "Both report the same Qwen video-input regression on transformers 5.3.0. The narrower 'vision_position_ids seems to be wrong' symptom plausibly describes the same root cause as the broader video regression report." + }, + { + "left": "issue:42175", + "right": "issue:43824", + "accept": false, + "reason": "Unrelated issues: one is about an optional backend missing from a pip extra, the other is an ImportError for a specific model class. No shared bug." + }, + { + "left": "issue:43540", + "right": "issue:44560", + "accept": false, + "reason": "Different models and exceptions: Qwen3OmniMoe video ValueError vs Qwen3-vl-embedding StopIteration. Too different to be the same bug." + }, + { + "left": "issue:44291", + "right": "issue:45042", + "accept": false, + "reason": "One is an init_empty_weights loading argument incompatibility; the other is an image processor backend dependency issue. Different code paths and fixes." + }, + { + "left": "issue:43577", + "right": "issue:45072", + "accept": false, + "reason": "Both mention dtype, but one is a Blip2 loading-state problem and the other is a CI inference mismatch for different model classes. Not the same concrete defect." + }, + { + "left": "issue:42175", + "right": "issue:44488", + "accept": false, + "reason": "Backend packaging/install issue vs a model load failure for cjvt/sleng-bert. No evidence of a shared root cause." + }, + { + "left": "issue:42915", + "right": "issue:44821", + "accept": false, + "reason": "Qwen3Moe FP8 config failure and AutoImageProcessor URL loading are unrelated bugs in different parts of the stack." + }, + { + "left": "issue:43906", + "right": "issue:45381", + "accept": false, + "reason": "An isolated reproduction of another external issue is not the same as the Qwen video-position-id regression. Different subject and likely different fix." + }, + { + "left": "issue:43540", + "right": "issue:44479", + "accept": false, + "reason": "Both are video-related Qwen issues, but they target different models and error types. They may share a subsystem, but not enough to treat as the same underlying bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 48, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:33357", + "issue:34567", + "issue:36010", + "issue:38175", + "issue:42175", + "issue:42915", + "issue:43582", + "issue:43720", + "issue:43742", + "issue:43756", + "issue:43782", + "issue:44484", + "issue:44704", + "issue:44898", + "issue:45137", + "issue:45325", + "issue:45405" + ], + "soft_pairs": [ + "issue:42915|issue:43742", + "issue:33357|issue:34567", + "issue:43720|issue:44898", + "issue:45137|issue:45405", + "issue:36010|issue:42175", + "issue:43756|issue:45325", + "issue:44484|issue:44704", + "issue:43582|issue:43782", + "issue:38175|issue:44898" + ], + "prepared_review_unit_hash": "e948a0ab90cde8383d9b3d0bb714a5b5b535b20370fbc74b3eaa97eb9be15574", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12506, + "estimated_input_tokens": 3127, + "estimated_eval_tokens": 6510 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No duplicate-worthy cluster here: the soft-similarity pairs are only superficially related (same broad model/loading/vision language) but describe different models, code paths, and failure modes.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42915", + "right": "issue:43742", + "accept": false, + "reason": "Both are loading-related, but one is a Qwen3Moe FineGrainedFP8Config failure and the other is a MobileLLM key error; different models and underlying bugs." + }, + { + "left": "issue:33357", + "right": "issue:34567", + "accept": false, + "reason": "MacOS bus error on CLIP model loading is unrelated to TrainerState num_input_tokens_seen not updating." + }, + { + "left": "issue:43720", + "right": "issue:44898", + "accept": false, + "reason": "BitNet packed-weight/accelerate loading failure and Perceiver interpolation failure affect different code paths and models." + }, + { + "left": "issue:45137", + "right": "issue:45405", + "accept": false, + "reason": "DeepSpeed ZeRO3 deque IndexError is unrelated to a PEFT version-bump packaging issue." + }, + { + "left": "issue:36010", + "right": "issue:42175", + "accept": false, + "reason": "GenerationMixin import failure and missing TensorFlow in the torch extra are different packaging/import problems." + }, + { + "left": "issue:43756", + "right": "issue:45325", + "accept": false, + "reason": "Both mention RoPE, but Smollm3 layer-selection behavior and Qwen2.5-VL temporal position scaling are distinct bugs." + }, + { + "left": "issue:44484", + "right": "issue:44704", + "accept": false, + "reason": "save_pretrained shard-size defaults and AutoProcessor kwarg forwarding are separate API issues." + }, + { + "left": "issue:43582", + "right": "issue:43782", + "accept": false, + "reason": "Apple Silicon caching_allocator_warmup TypeError is unrelated to Qwen3VL weight_only loading errors." + }, + { + "left": "issue:38175", + "right": "issue:44898", + "accept": false, + "reason": "SigLIP2 zero-probability output and Perceiver positional interpolation failure are different model-specific issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 49, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:28282", + "issue:29127", + "issue:34567", + "issue:34689", + "issue:36331", + "issue:43452", + "issue:43643", + "issue:43646", + "issue:43653", + "issue:43742", + "issue:43749", + "issue:44062", + "issue:44291", + "issue:44361", + "issue:44488", + "issue:44821", + "issue:45081", + "issue:45375" + ], + "soft_pairs": [ + "issue:43643|issue:45375", + "issue:44062|issue:45081", + "issue:43646|issue:43749", + "issue:43742|issue:44291", + "issue:28282|issue:34689", + "issue:43452|issue:44488", + "issue:44291|issue:44821", + "issue:43653|issue:44361", + "issue:28282|issue:29127", + "issue:34567|issue:36331" + ], + "prepared_review_unit_hash": "3a2754d562aac05c80a6d7e9a5c0b8cb1be567f6b12fe3ef4dce53f1861e7217", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13564, + "estimated_input_tokens": 3391, + "estimated_eval_tokens": 7038 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of unrelated Transformers bug reports spanning tokenizer loading, config parsing, trainer API changes, and model-specific failures. The soft-similarity pairs share only surface-level patterns (loading errors, missing fields, TypeErrors) but not the same underlying defect.", + "confidence": 0.94, + "canonical_issue_reason": "No single issue cleanly represents the whole set; the reports cover distinct code paths and regressions. If forced to pick the broadest, issue 43646 is the most general, but it is not a duplicate of the others.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43646 is the broadest user-facing regression report in the set, but the cluster is too heterogeneous for a strong canonical issue choice.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43643", + "right": "issue:45375", + "accept": false, + "reason": "Both mention missing fields in config objects, but they affect different configs and different mechanisms (`trust_remote_code` vs `@strict` dropping a field). Not the same bug." + }, + { + "left": "issue:44062", + "right": "issue:45081", + "accept": false, + "reason": "Both are tokenizer-related TypeErrors, but one is about `AddedToken(special=...)` and the other is a Mistral regex patch accessing `backend_tokenizer`. Different failure sites and causes." + }, + { + "left": "issue:43646", + "right": "issue:43749", + "accept": false, + "reason": "Different regressions: custom model initialization breakage versus FSDP CPU RAM efficient loading. Same broad release scope, not the same underlying code path." + }, + { + "left": "issue:43742", + "right": "issue:44291", + "accept": false, + "reason": "Both involve loading/model init issues, but one is a key error for MobileLLM and the other is an `init_empty_weights`/`_is_hf_initialized` argument mismatch. Not duplicates." + }, + { + "left": "issue:28282", + "right": "issue:34689", + "accept": false, + "reason": "Both are model-loading related, but one is missing PyTorch dependency detection and the other is a Llama 3.2 vision loading regression. Different environments and root causes." + }, + { + "left": "issue:43452", + "right": "issue:44488", + "accept": false, + "reason": "Both mention tokenizer/model loading, but one is about `gguf_file` handling and the other is a specific tokenizer load failure for `cjvt/sleng-bert`. Not the same defect." + }, + { + "left": "issue:44291", + "right": "issue:44821", + "accept": false, + "reason": "`init_empty_weights` initialization error is unrelated to inability to load `AutoImageProcessor` from a URL; different API surfaces and code paths." + }, + { + "left": "issue:43653", + "right": "issue:44361", + "accept": false, + "reason": "Both are tokenizer-specific bugs, but one is about special-token registration for BigBirdTokenizer and the other is an AttributeError in MLukeTokenizer tasks. Different tokenizers and behaviors." + }, + { + "left": "issue:28282", + "right": "issue:29127", + "accept": false, + "reason": "PyTorch missing-library ImportError and LayoutLMv3 error messaging for insufficient box info are unrelated issues; only the generic error-reporting theme overlaps." + }, + { + "left": "issue:34567", + "right": "issue:36331", + "accept": false, + "reason": "TrainerState token counter not updating and `compute_loss()` signature mismatch are distinct trainer API issues with different symptoms and fixes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 50, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:43531", + "issue:43643", + "issue:43653", + "issue:43688", + "issue:43742", + "issue:43749", + "issue:43927", + "issue:44514", + "issue:44561", + "issue:44589", + "issue:44792", + "issue:44821", + "issue:44964", + "issue:45042", + "issue:45081", + "issue:45290", + "issue:45381" + ], + "soft_pairs": [ + "issue:43688|issue:43749", + "issue:44792|issue:44964", + "issue:43643|issue:44561", + "issue:44514|issue:45290", + "issue:43531|issue:44821", + "issue:43742|issue:44589", + "issue:43653|issue:43927", + "issue:44514|issue:45381", + "issue:45042|issue:45081" + ], + "prepared_review_unit_hash": "6af5275d7d69869aad1dbaa8029f0e4e388ed569307fcf3fdb9339e97cbac0b4", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12547, + "estimated_input_tokens": 3137, + "estimated_eval_tokens": 6530 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This is a heterogeneous cluster of unrelated bug reports across different models, tokenizers, processors, and loading paths. None of the soft pairs look like the same underlying issue, so I would not merge any of them.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "No single issue is a strong canonical representative: the reports span distinct subsystems and failure modes (loading, generation, chat templating, tokenizer/config handling, multimodal processing).", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43688", + "right": "issue:43749", + "accept": false, + "reason": "Different problems: auxiliary-loss normalization in MoE models vs broken FSDP CPU RAM-efficient loading. Same broad training area, but not the same bug." + }, + { + "left": "issue:44792", + "right": "issue:44964", + "accept": false, + "reason": "Both touch multimodal image models, but one is a failing janus image-generation test and the other is a Phi-4 multimodal load failure. Different code paths and symptoms." + }, + { + "left": "issue:43643", + "right": "issue:44561", + "accept": false, + "reason": "Both involve trust_remote_code, but one is missing fields from AutoConfig.from_pretrained and the other is a v5.0 break caused by removing is_torch_fx_available. Not the same defect." + }, + { + "left": "issue:44514", + "right": "issue:45290", + "accept": false, + "reason": "Both mention apply_chat_template, but one crashes on batched padded input and the other on assistant tool-call messages with empty content. Separate edge cases." + }, + { + "left": "issue:43531", + "right": "issue:44821", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior vs loading AutoImageProcessor from URL are unrelated." + }, + { + "left": "issue:43742", + "right": "issue:44589", + "accept": false, + "reason": "KeyError while loading MobileLLM-125M vs Float8 storage deserialization failure are unrelated loading/runtime bugs." + }, + { + "left": "issue:43653", + "right": "issue:43927", + "accept": false, + "reason": "Tokenizer special-token registration causing empty decode vs DiaConfig losing custom token IDs after save/load. Both involve token metadata, but different components and failures." + }, + { + "left": "issue:44514", + "right": "issue:45381", + "accept": false, + "reason": "apply_chat_template padding crash vs Qwen2.5-VL video vision_position_ids bug are different multimodal issues." + }, + { + "left": "issue:45042", + "right": "issue:45081", + "accept": false, + "reason": "PIL backend incorrectly requiring torchvision vs Mistral regex patch AttributeError are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 51, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:41762", + "issue:43295", + "issue:43452", + "issue:43504", + "issue:43550", + "issue:43577", + "issue:43606", + "issue:43653", + "issue:43688", + "issue:43697", + "issue:43720", + "issue:44568", + "issue:44843", + "issue:44898", + "issue:44936", + "issue:45216", + "issue:45356", + "issue:45440" + ], + "soft_pairs": [ + "issue:43452|issue:44843", + "issue:43504|issue:43720", + "issue:43697|issue:45216", + "issue:43452|issue:43577", + "issue:43688|issue:45440", + "issue:43504|issue:43550", + "issue:41762|issue:44936", + "issue:43295|issue:45356", + "issue:43606|issue:44898", + "issue:43653|issue:44568" + ], + "prepared_review_unit_hash": "32bedb33b7a8bda9804219d939c2bdf602f86bc80dbbc695b36bafd188a4e04d", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13782, + "estimated_input_tokens": 3446, + "estimated_eval_tokens": 7148 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the items span unrelated tokenizer, model-loading, training, and vision-model regressions. None of the soft pairs look like the same underlying bug, so there is no true duplicate group here.", + "confidence": 0.96, + "canonical_issue_reason": "issue:44843 is the best representative anchor because it is a concrete, reproducible tokenizer-loading regression with a clear failure mode and user impact. That said, it is not a duplicate of the others; the cluster is too diverse for a single canonical bug.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44843 is the most actionable and broadly representative of the cluster's loose loading/tokenizer theme, but only as a representative issue\u2014not as a duplicate target.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43452", + "right": "issue:44843", + "accept": false, + "reason": "Both involve from_pretrained/tokenizer loading, but the concrete bugs differ: gguf_file breaks tokenizer/model loading, while 44843 is an offline failure in _patch_mistral_regex/model_info. Different code paths and symptoms." + }, + { + "left": "issue:43504", + "right": "issue:43720", + "accept": false, + "reason": "Different models and failure modes: Beit legacy-field loading vs BitNet packed-weight unpacking during accelerate loading. Same broad loading theme, not the same bug." + }, + { + "left": "issue:43697", + "right": "issue:45216", + "accept": false, + "reason": "RTDetrV2 output drift in v5 and Qwen3.5 save_pretrained corruption are unrelated regressions affecting different subsystems." + }, + { + "left": "issue:43452", + "right": "issue:43577", + "accept": false, + "reason": "gguf_file breaks AutoTokenizer/AutoModel loading, while BLIP2 has an incorrect dtype propagation issue. Not the same underlying defect." + }, + { + "left": "issue:43688", + "right": "issue:45440", + "accept": false, + "reason": "Both are MoE-related, but one is about auxiliary loss normalization and the other about DeepSeekV3 implementation divergence. Different bug classes." + }, + { + "left": "issue:43504", + "right": "issue:43550", + "accept": false, + "reason": "Beit legacy-field loading and Bamba torch.compile/SDPA failure are unrelated; same broad ML area only." + }, + { + "left": "issue:41762", + "right": "issue:44936", + "accept": false, + "reason": "Gemma3 ZeRO-3 loading IndexError and trainer.evaluate() after trainer.train() are different lifecycle issues with different failure contexts." + }, + { + "left": "issue:43295", + "right": "issue:45356", + "accept": false, + "reason": "Both are tokenizer-adjacent regressions, but one is about processor.tokenizer/image passing and the other is Kimi codec handling plus a misleading warning. Not the same bug." + }, + { + "left": "issue:43606", + "right": "issue:44898", + "accept": false, + "reason": "CPU offload device mismatch in bark-small and Perceiver interpolate_pos_encoding failure are different model-specific bugs." + }, + { + "left": "issue:43653", + "right": "issue:44568", + "accept": false, + "reason": "Both concern special tokens, but BigBird's mask token registration and mDeBERTa's add_special_tokens BOS/EOS omission are distinct tokenizer defects." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 52, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:41720", + "issue:43504", + "issue:43525", + "issue:43606", + "issue:43643", + "issue:43720", + "issue:43906", + "issue:43927", + "issue:44464", + "issue:44466", + "issue:44610", + "issue:44625", + "issue:44743", + "issue:44843", + "issue:44877", + "issue:44991", + "issue:45005" + ], + "soft_pairs": [ + "issue:43504|issue:44610", + "issue:44625|issue:44877", + "issue:41720|issue:43906", + "issue:44464|issue:44743", + "issue:44843|issue:44991", + "issue:43720|issue:43927", + "issue:43606|issue:43927", + "issue:43525|issue:43643", + "issue:44466|issue:45005" + ], + "prepared_review_unit_hash": "0d6e6b78619f6043e7cab6921b271f353ae2882c09aafe67923ac9ba0aea9c3d", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12691, + "estimated_input_tokens": 3173, + "estimated_eval_tokens": 6602 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly unrelated bug reports with one likely duplicate pair around missing config fields during AutoConfig loading. No PRs present.", + "confidence": 0.67, + "canonical_issue_reason": "43643 is the broader, more general report about `trust_remote_code=True` dropping fields from `AutoConfig` results; 43525 looks like a concrete symptom of that same missing-field problem (`pad_token_id`).", + "canonical_pr_reason": null, + "best_issue_reason": "43643 is the best single issue to keep because it is open, generalizes the narrower Llama4 symptom, and points at the underlying config-loading defect.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43504", + "right": "issue:44610", + "accept": false, + "reason": "Different bugs: legacy-field/preset loading for BEiT segmentation vs processor/model input-size mismatch for OmDet-Turbo." + }, + { + "left": "issue:44625", + "right": "issue:44877", + "accept": false, + "reason": "Both involve config handling, but one is `num_labels` propagation and the other is strict config blocking `granite_speech`; not the same failure path." + }, + { + "left": "issue:41720", + "right": "issue:43906", + "accept": false, + "reason": "The second is only an isolated reproduction of another issue, and there is no evidence it matches the Qwen3 auto-device-mapping CUDA assert." + }, + { + "left": "issue:44464", + "right": "issue:44743", + "accept": false, + "reason": "Both concern generation/state behavior, but one is chunked generation with compiled forward and the other is recurrent state reset with cache; too different to merge safely." + }, + { + "left": "issue:44843", + "right": "issue:44991", + "accept": false, + "reason": "Tokenizer-loading/offline regression vs a specific tokenizer failure on EMBEDDIA/est-roberta; same area, but not clearly the same concrete bug." + }, + { + "left": "issue:43720", + "right": "issue:43927", + "accept": false, + "reason": "Packed-weight loading during accelerate vs config token IDs lost on save/load; unrelated code paths and symptoms." + }, + { + "left": "issue:43606", + "right": "issue:43927", + "accept": false, + "reason": "CPU-offload device mismatch is unrelated to DiaConfig losing custom token IDs and causing generation errors." + }, + { + "left": "issue:43525", + "right": "issue:43643", + "accept": true, + "reason": "Very likely the same underlying missing-field/config-loading bug: 43525's missing `pad_token_id` is a concrete example of fields being dropped from the returned config object." + }, + { + "left": "issue:44466", + "right": "issue:45005", + "accept": false, + "reason": "Both mention tied weights in v5, but one is a device-dependent `lm_head.weight` serialization inconsistency and the other is a broader translation-model tied-weights report; not clearly the same fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 53, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:24643", + "issue:29127", + "issue:38175", + "issue:40444", + "issue:43122", + "issue:43295", + "issue:43577", + "issue:43697", + "issue:43720", + "issue:43819", + "issue:43906", + "issue:44361", + "issue:44479", + "issue:44811", + "issue:44936", + "issue:44977", + "issue:45356" + ], + "soft_pairs": [ + "issue:43819|issue:44811", + "issue:40444|issue:44811", + "issue:43577|issue:43697", + "issue:43720|issue:44361", + "issue:24643|issue:29127", + "issue:44977|issue:45356", + "issue:38175|issue:43295", + "issue:43122|issue:43906", + "issue:44479|issue:44936" + ], + "prepared_review_unit_hash": "4636e2045900cfbf553d378922e1d4edf813d8acd2c0ccb74a09c2d4556ccd4b", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12675, + "estimated_input_tokens": 3169, + "estimated_eval_tokens": 6594 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster appears to be a false-positive grouping of unrelated issue reports across different models, tokenizers, training, and inference paths. None of the soft pairs look like the same underlying bug or change.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43819", + "right": "issue:44811", + "accept": false, + "reason": "Different features and code paths: DAC latent decoding vs Whisper batch_decode token skipping." + }, + { + "left": "issue:40444", + "right": "issue:44811", + "accept": false, + "reason": "Qwen2.5-VL multi-image IterableDataset failure is unrelated to Whisper decoding behavior." + }, + { + "left": "issue:43577", + "right": "issue:43697", + "accept": false, + "reason": "BLIP2 dtype loading bug and RTDetrV2 output regression are separate model-specific issues." + }, + { + "left": "issue:43720", + "right": "issue:44361", + "accept": false, + "reason": "BitNet packed-weight loading and MLukeTokenizer task AttributeError are unrelated subsystems." + }, + { + "left": "issue:24643", + "right": "issue:29127", + "accept": false, + "reason": "DeepSpeed weight shape error and LayoutLMv3 box-info messaging are different problems." + }, + { + "left": "issue:44977", + "right": "issue:45356", + "accept": false, + "reason": "Qwen3.5 flash-attention generation regression is unrelated to Kimi-K2.5 tokenizer codec handling." + }, + { + "left": "issue:38175", + "right": "issue:43295", + "accept": false, + "reason": "SigLIP2 zero-probability inference issue and processor.tokenizer regression are distinct failures." + }, + { + "left": "issue:43122", + "right": "issue:43906", + "accept": false, + "reason": "Both mention tokenization, but one is a version-difference report and the other is only a reproduction of another issue; no clear same bug linkage." + }, + { + "left": "issue:44479", + "right": "issue:44936", + "accept": false, + "reason": "Video-input regression for Qwen models and trainer.evaluate() failing after train() are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 54, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:28282", + "issue:30064", + "issue:32090", + "issue:33357", + "issue:33666", + "issue:35141", + "issue:43329", + "issue:43452", + "issue:43504", + "issue:43531", + "issue:43577", + "issue:43606", + "issue:43742", + "issue:44451", + "issue:44464", + "issue:44554", + "issue:44977", + "issue:45325" + ], + "soft_pairs": [ + "issue:30064|issue:33666", + "issue:28282|issue:33357", + "issue:28282|issue:32090", + "issue:28282|issue:35141", + "issue:43504|issue:43606", + "issue:43577|issue:44554", + "issue:44464|issue:44977", + "issue:43531|issue:43742", + "issue:43329|issue:45325", + "issue:43452|issue:44451" + ], + "prepared_review_unit_hash": "942ffa8f50bb85852b4c60d2939f81efdb3c1facac95028d16c352ce7efdf1c1", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13545, + "estimated_input_tokens": 3387, + "estimated_eval_tokens": 7030 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "All proposed soft pairs look like false positives. The items span unrelated bugs across loading, training, multimodal processing, generation, and device/runtime issues, so this cluster should not be collapsed into a single duplicate set.", + "confidence": 0.98, + "canonical_issue_reason": "No canonical issue: the reports describe distinct failures in different subsystems and code paths.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue: none of the issues is a safe anchor for the rest, since the pairwise similarities are only superficial.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:30064", + "right": "issue:33666", + "accept": false, + "reason": "Different features and failure modes: segmentation-map processing vs multi-GPU Qwen2-VL training." + }, + { + "left": "issue:28282", + "right": "issue:33357", + "accept": false, + "reason": "Both are runtime failures, but one is a missing PyTorch import and the other is a MacOS bus error with CLIP." + }, + { + "left": "issue:28282", + "right": "issue:32090", + "accept": false, + "reason": "Unrelated bugs: missing backend dependency vs NoneType broadcast error in Trainer." + }, + { + "left": "issue:28282", + "right": "issue:35141", + "accept": false, + "reason": "Different code paths: dependency import failure vs embedding resize/post_init reinitialization." + }, + { + "left": "issue:43504", + "right": "issue:43606", + "accept": false, + "reason": "Different model-loading problems: legacy preset field vs CPU offload device mismatch." + }, + { + "left": "issue:43577", + "right": "issue:44554", + "accept": false, + "reason": "Distinct correctness issues: BLIP2 dtype propagation vs MPS attention shape/value-head mismatch." + }, + { + "left": "issue:44464", + "right": "issue:44977", + "accept": false, + "reason": "Both affect generation, but the concrete bugs and model stacks differ too much to be the same change." + }, + { + "left": "issue:43531", + "right": "issue:43742", + "accept": false, + "reason": "Different failures: Qwen3-MoE sliding_window behavior vs a key error loading MobileLLM-125M." + }, + { + "left": "issue:43329", + "right": "issue:45325", + "accept": false, + "reason": "Both involve VLM internals, but one is an undefined-variable video branch bug and the other is a rope-index scaling issue." + }, + { + "left": "issue:43452", + "right": "issue:44451", + "accept": false, + "reason": "Both are model-loading failures, but the specific breakage differs: gguf_file support vs loading ScandiBERT." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 55, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:41720", + "issue:43012", + "issue:43334", + "issue:43504", + "issue:43540", + "issue:43572", + "issue:43577", + "issue:43653", + "issue:43716", + "issue:43927", + "issue:44361", + "issue:44464", + "issue:44479", + "issue:44625", + "issue:44849", + "issue:45200", + "issue:45229", + "issue:45357" + ], + "soft_pairs": [ + "issue:43504|issue:43653", + "issue:44849|issue:45357", + "issue:43927|issue:44361", + "issue:45200|issue:45229", + "issue:43012|issue:43577", + "issue:44479|issue:44625", + "issue:43334|issue:43572", + "issue:43540|issue:43716", + "issue:41720|issue:44464" + ], + "prepared_review_unit_hash": "5b27c35bc57696ea1f590b6d420ed1609aefbfebefec902fe98469f7434daf32", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13161, + "estimated_input_tokens": 3291, + "estimated_eval_tokens": 6838 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous: the paired items share only broad keywords like dtype, token IDs, or model family names, but they describe different bugs, models, and code paths. No soft edge looks like a true duplicate or mergeable PR pair.", + "confidence": 0.97, + "canonical_issue_reason": "No single issue is a safe canonical because the items span unrelated regressions across different models and subsystems.", + "canonical_pr_reason": "No pull requests are present in this cluster.", + "best_issue_reason": "No global best issue stands out; any choice would be arbitrary because the cluster is not a duplicate set.", + "best_pr_reason": "No pull requests are present in this cluster.", + "soft_edge_verdicts": [ + { + "left": "issue:43504", + "right": "issue:43653", + "accept": false, + "reason": "Both are load/token-related, but one is a legacy-field preset loading bug in BEiT and the other is a BigBirdTokenizer special-token registration issue; different components and failure modes." + }, + { + "left": "issue:44849", + "right": "issue:45357", + "accept": false, + "reason": "Both mention Qwen3.5, but one is about output_hidden_states behavior and the other is a save_pretrained regression with visual encoder keys; separate code paths." + }, + { + "left": "issue:43927", + "right": "issue:44361", + "accept": false, + "reason": "DiaConfig losing custom token IDs on save/load is unrelated to MLukeTokenizer raising AttributeError on tasks; no shared underlying bug is evident." + }, + { + "left": "issue:45200", + "right": "issue:45229", + "accept": false, + "reason": "Gemma 4 text-only token-type defaults and Gemma 4 multi-GPU CUDA OOM are different problems; one is input/config handling, the other is memory capacity." + }, + { + "left": "issue:43012", + "right": "issue:43577", + "accept": false, + "reason": "Both concern dtype/precision, but one is a PyTorch warning during bfloat16 compilation and the other is BLIP2 dtype fields staying float32 after loading; not the same bug." + }, + { + "left": "issue:44479", + "right": "issue:44625", + "accept": false, + "reason": "Qwen video-input regression and Qwen3.5 num_labels propagation are unrelated; one affects multimodal preprocessing, the other config propagation for labels." + }, + { + "left": "issue:43334", + "right": "issue:43572", + "accept": false, + "reason": "Both involve missing token IDs in configs, but they affect different model families and different fields (pad_token_id vs pad_token_idx); not the same underlying issue." + }, + { + "left": "issue:43540", + "right": "issue:43716", + "accept": false, + "reason": "A video-input ValueError in Qwen3OmniMoe and a dtype mismatch between image preprocessor and model in Mistral-3 are distinct multimodal bugs." + }, + { + "left": "issue:41720", + "right": "issue:44464", + "accept": false, + "reason": "Qwen3 auto device mapping causing cudaErrorAssert and chunked generation inconsistency with compiled forward are different execution-path failures with no clear shared root cause." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 56, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:32090", + "issue:33666", + "issue:34567", + "issue:43262", + "issue:43278", + "issue:43493", + "issue:43525", + "issue:43572", + "issue:43688", + "issue:43742", + "issue:43756", + "issue:43825", + "issue:43873", + "issue:44488", + "issue:44821", + "issue:44843", + "issue:44913", + "issue:45084" + ], + "soft_pairs": [ + "issue:43742|issue:44821", + "issue:43525|issue:43572", + "issue:32090|issue:33666", + "issue:32090|issue:34567", + "issue:43756|issue:44913", + "issue:43278|issue:43688", + "issue:43493|issue:43825", + "issue:43262|issue:45084", + "issue:44488|issue:44843", + "issue:43688|issue:43873" + ], + "prepared_review_unit_hash": "c9208e9795bd9e69e3958613039bacb951ca76d39c8f704a4ae6680469659b97", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13314, + "estimated_input_tokens": 3329, + "estimated_eval_tokens": 6914 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous: none of the soft pairs look like the same underlying bug or change, so I would not merge any of them as duplicates.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue stands out because the items span unrelated trainer, model config, tokenizer, processor, and pipeline problems.", + "canonical_pr_reason": null, + "best_issue_reason": "If one issue must anchor the set, #43873 is the most active/open and has the most inbound references, but it is not a true duplicate hub for the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43742", + "right": "issue:44821", + "accept": false, + "reason": "Different loading failures: one is a key error for facebook/MobileLLM-125M, the other is an AutoImageProcessor URL-loading problem." + }, + { + "left": "issue:43525", + "right": "issue:43572", + "accept": false, + "reason": "Both involve missing pad-token-related config fields, but they affect different model configs and different attributes, so they are not the same concrete bug." + }, + { + "left": "issue:32090", + "right": "issue:33666", + "accept": false, + "reason": "One is a Trainer GPU broadcast TypeError; the other is a Qwen2-VL multi-GPU training request. Same broad area, not the same failure." + }, + { + "left": "issue:32090", + "right": "issue:34567", + "accept": false, + "reason": "Trainer broadcast NoneType error and TrainerState token-count update bug are distinct trainer issues with different code paths." + }, + { + "left": "issue:43756", + "right": "issue:44913", + "accept": false, + "reason": "Both concern rotary settings, but one is about Smollm3 dropping RoPE layers while the other is a GPTNeoX config reload regression." + }, + { + "left": "issue:43278", + "right": "issue:43688", + "accept": false, + "reason": "Embedding dtype drift on evaluate is unrelated to auxiliary-loss normalization in OLMoE/GPT Oss." + }, + { + "left": "issue:43493", + "right": "issue:43825", + "accept": false, + "reason": "SigLIP2 implementation mismatch and pipeline translation-task messaging are unrelated issues." + }, + { + "left": "issue:43262", + "right": "issue:45084", + "accept": false, + "reason": "Audio processor chat-template sampling-rate default and template-compilation TypeError are different code paths and symptoms." + }, + { + "left": "issue:44488", + "right": "issue:44843", + "accept": false, + "reason": "Model loading failure for cjvt/sleng-bert and offline-mode mistral regex patch bug are separate loader problems." + }, + { + "left": "issue:43688", + "right": "issue:43873", + "accept": false, + "reason": "Auxiliary-loss normalization and quantization/offloading behavior are unrelated bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 57, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:33666", + "issue:36010", + "issue:39692", + "issue:43257", + "issue:43278", + "issue:43299", + "issue:43493", + "issue:43540", + "issue:43756", + "issue:43844", + "issue:44060", + "issue:44423", + "issue:44734", + "issue:44964", + "issue:44977", + "issue:45072", + "issue:45198", + "issue:45405" + ], + "soft_pairs": [ + "issue:33666|issue:36010", + "issue:43278|issue:43844", + "issue:43756|issue:44060", + "issue:43257|issue:43299", + "issue:45072|issue:45198", + "issue:43540|issue:44977", + "issue:44423|issue:44734", + "issue:43493|issue:43844", + "issue:44964|issue:45405", + "issue:39692|issue:43493" + ], + "prepared_review_unit_hash": "957b55e6071b196bc92bdca9cf237aa161816c14d284befa1c28e2448f769844", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13533, + "estimated_input_tokens": 3384, + "estimated_eval_tokens": 7024 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are not duplicates of one another; they span unrelated bugs across different models and subsystems (multimodal training, loading/import failures, documentation examples, MoE loading, dtype mismatches, serving crashes, and release/versioning). No PRs are present in the cluster.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:33666", + "right": "issue:36010", + "accept": false, + "reason": "Qwen2-VL multi-GPU training and a GenerationMixin import error are different failure modes in different paths." + }, + { + "left": "issue:43278", + "right": "issue:43844", + "accept": false, + "reason": "One is an embedding dtype regression during eval; the other is a gradient explosion with HfDeepSpeedConfig + ZeRO-3." + }, + { + "left": "issue:43756", + "right": "issue:44060", + "accept": false, + "reason": "Smollm3 RoPE-layer mismatch and Qwen3-Next tied-weights warning are unrelated architecture/configuration bugs." + }, + { + "left": "issue:43257", + "right": "issue:43299", + "accept": false, + "reason": "Both mention Qwen3/Qwen3VL MoE loading, but one is an accelerate+deepspeed conversion issue and the other is a transformers version regression; not the same concrete bug." + }, + { + "left": "issue:45072", + "right": "issue:45198", + "accept": false, + "reason": "CI dtype mismatches for SwitchTransformers/TimmWrapperModel and Wav2Vec2 save/tokenization failures are distinct issues." + }, + { + "left": "issue:43540", + "right": "issue:44977", + "accept": false, + "reason": "Qwen3OmniMoe video-input ValueError and Qwen3.5 flash-attention generation failure affect different code paths and symptoms." + }, + { + "left": "issue:44423", + "right": "issue:44734", + "accept": false, + "reason": "Both are serve-related crashes, but one is multimodal continuous batching with a string `.to()` error and the other is KV-cache continuation indexing; different bugs." + }, + { + "left": "issue:43493", + "right": "issue:43844", + "accept": false, + "reason": "SigLIP2 JAX/HF discrepancy is a model-implementation mismatch, not the ZeRO-3 gradient amplification bug." + }, + { + "left": "issue:44964", + "right": "issue:45405", + "accept": false, + "reason": "Loading Phi-4-multimodal-instruct with latest transformers and a bumped MIN_PEFT_VERSION release issue are unrelated." + }, + { + "left": "issue:39692", + "right": "issue:43493", + "accept": false, + "reason": "Both concern SigLIP2, but one is a docs example error/quantization failure and the other is a core HF vs JAX implementation discrepancy; not mergeable as one fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 58, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:33357", + "issue:33666", + "issue:36331", + "issue:43329", + "issue:43572", + "issue:43606", + "issue:43646", + "issue:43688", + "issue:43746", + "issue:43824", + "issue:43994", + "issue:44291", + "issue:44466", + "issue:44493", + "issue:44521", + "issue:44589", + "issue:45290" + ], + "soft_pairs": [ + "issue:33666|issue:36331", + "issue:43824|issue:44589", + "issue:44521|issue:45290", + "issue:43572|issue:44291", + "issue:43688|issue:43746", + "issue:43646|issue:44466", + "issue:43994|issue:44493", + "issue:43329|issue:43606", + "issue:33357|issue:33666" + ], + "prepared_review_unit_hash": "b8c1c2d4cd4246c29b6e8931db0375d159caa625c569404fb8a77769d29e3fdc", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12735, + "estimated_input_tokens": 3184, + "estimated_eval_tokens": 6624 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are mostly unrelated issues across different models and code paths; none of the proposed soft edges looks like a true duplicate pair.", + "confidence": 0.96, + "canonical_issue_reason": "No single canonical issue: the set spans distinct bugs in loading, training, tokenization, config, and serialization across different subsystems/models.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45290 is the clearest self-contained report with a narrow scope and specific crash condition, so it is the best representative issue if one must be chosen.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:33666", + "right": "issue:36331", + "accept": false, + "reason": "Different problems: multi-GPU training support for Qwen2-VL vs a Trainer API signature break in compute_loss." + }, + { + "left": "issue:43824", + "right": "issue:44589", + "accept": false, + "reason": "Unrelated failures: an import error for Qwen2.5-VL vs a Float8 storage lookup/type error." + }, + { + "left": "issue:44521", + "right": "issue:45290", + "accept": false, + "reason": "Both involve apply_chat_template, but the bugs differ: all-zero assistant masks for multimodal inputs vs a crash on tool-call assistant messages with no content." + }, + { + "left": "issue:43572", + "right": "issue:44291", + "accept": false, + "reason": "Both touch v5 migration, but one is a missing config field and the other is an init_empty_weights argument mismatch; different code paths and fixes." + }, + { + "left": "issue:43688", + "right": "issue:43746", + "accept": false, + "reason": "Different models and behaviors: auxiliary-loss normalization vs PEFT adapter checkpoint loading." + }, + { + "left": "issue:43646", + "right": "issue:44466", + "accept": false, + "reason": "Not the same bug: custom model initialization breakage vs inconsistent lm_head serialization depending on device." + }, + { + "left": "issue:43994", + "right": "issue:44493", + "accept": false, + "reason": "Different symptoms and likely causes: nonsensical AutoModel/pipeline output for SigLIP2 vs unexpected position-id key warnings." + }, + { + "left": "issue:43329", + "right": "issue:43606", + "accept": false, + "reason": "Separate areas entirely: multimodal video token counting bug vs CPU offload device mismatch in bark-small." + }, + { + "left": "issue:33357", + "right": "issue:33666", + "accept": false, + "reason": "No overlap beyond being model-related: MacOS bus error in CLIP vs Qwen2-VL multi-GPU training." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 59, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:15354", + "issue:38175", + "issue:39401", + "issue:39692", + "issue:43232", + "issue:43257", + "issue:43278", + "issue:43295", + "issue:43540", + "issue:43575", + "issue:43701", + "issue:44315", + "issue:44568", + "issue:44792", + "issue:44849", + "issue:45200", + "issue:45405" + ], + "soft_pairs": [ + "issue:39692|issue:43295", + "issue:39401|issue:44568", + "issue:43575|issue:45405", + "issue:43232|issue:44849", + "issue:44792|issue:45405", + "issue:43257|issue:45405", + "issue:15354|issue:38175", + "issue:43540|issue:45200", + "issue:43278|issue:43701", + "issue:44315|issue:44849" + ], + "prepared_review_unit_hash": "73db7b1b469230daeae337272332cf26467d26098e0738054986610cdace0844", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12829, + "estimated_input_tokens": 3208, + "estimated_eval_tokens": 6672 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is highly heterogeneous: the issues cover unrelated models, tokenizer behavior, generation internals, training/resume mechanics, and a release/versioning problem. There is no clear duplicate set or shared underlying bug across the soft pairs.", + "confidence": 0.98, + "canonical_issue_reason": "No single canonical issue fits this cluster; the items are unrelated enough that deduplication would be incorrect.", + "canonical_pr_reason": null, + "best_issue_reason": "No strong best issue exists. If forced to pick the broadest regression-style report, issue:43295 is the most generic, but it is not representative of the rest of the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:39692", + "right": "issue:43295", + "accept": false, + "reason": "SigLIP2 docs-example failures vs a regression in custom model code using processor.tokenizer; different symptoms and likely different fixes." + }, + { + "left": "issue:39401", + "right": "issue:44568", + "accept": false, + "reason": "Qwen3 tokenizer offset_mapping bug is unrelated to mdeberta-v3 add_special_tokens/BOS/EOS behavior." + }, + { + "left": "issue:43575", + "right": "issue:45405", + "accept": false, + "reason": "TP OOM when loading Qwen2-57B-A14B-Instruct is unrelated to the PEFT version bump / unreleased dependency issue." + }, + { + "left": "issue:43232", + "right": "issue:44849", + "accept": false, + "reason": "Generation state sync bug (_update_model_kwargs_for_generation after sync_gpus) is unrelated to Qwen3.5 output_hidden_states handling." + }, + { + "left": "issue:44792", + "right": "issue:45405", + "accept": false, + "reason": "Janus test failure on image generation is a model-specific test issue, not the PEFT version mismatch problem." + }, + { + "left": "issue:43257", + "right": "issue:45405", + "accept": false, + "reason": "Qwen3 MOE weight conversion with accelerate+deepspeed is unrelated to the MIN_PEFT_VERSION release/versioning issue." + }, + { + "left": "issue:15354", + "right": "issue:38175", + "accept": false, + "reason": "torch.jit.script generator-expression export failure is unrelated to unexpected zero probabilities in SigLIP2." + }, + { + "left": "issue:43540", + "right": "issue:45200", + "accept": false, + "reason": "Video-input validation in Qwen3OmniMoe is unrelated to Gemma 4 mm_token_type_ids defaulting for text-only fine-tuning." + }, + { + "left": "issue:43278", + "right": "issue:43701", + "accept": false, + "reason": "Embedding dtype drift BF16->FP32 during evaluate is unrelated to resume_from_checkpoint key mismatch." + }, + { + "left": "issue:44315", + "right": "issue:44849", + "accept": false, + "reason": "Liger Kernel not applied with model_init is a training setup bug, not the same as Qwen3.5 output_hidden_states=True behavior." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 60, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:37428", + "issue:43334", + "issue:43525", + "issue:43582", + "issue:43606", + "issue:43824", + "issue:43844", + "issue:43957", + "issue:44112", + "issue:44442", + "issue:44554", + "issue:44964", + "issue:44977", + "issue:45198", + "issue:45200", + "issue:45325", + "issue:45356" + ], + "soft_pairs": [ + "issue:43582|issue:44977", + "issue:43844|issue:44964", + "issue:37428|issue:43957", + "issue:43606|issue:44112", + "issue:45200|issue:45325", + "issue:43334|issue:43525", + "issue:44554|issue:45356", + "issue:44442|issue:45198", + "issue:37428|issue:43824" + ], + "prepared_review_unit_hash": "d5a6dec165a6c27d88d0638ec857e2120b37190ad6f702d3b6ac079ed622809f", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12769, + "estimated_input_tokens": 3193, + "estimated_eval_tokens": 6642 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Only one soft duplicate pair stands out: the two pad_token_id loading failures look like the same underlying config regression. The rest are unrelated bugs across different models/subsystems and should not be merged.", + "confidence": 0.69, + "canonical_issue_reason": "issue:43334 is the clearest representative of the shared pad_token_id AttributeError regression; issue:43525 looks like a sibling manifestation on another model config.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43334 is the best single issue to track the duplicate pad_token_id loading failure because it states the failure mode most directly.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43582", + "right": "issue:44977", + "accept": false, + "reason": "Different bugs: Apple Silicon allocator warmup TypeError vs Qwen3.5 flash-attention generation behavior." + }, + { + "left": "issue:43844", + "right": "issue:44964", + "accept": false, + "reason": "Different failure modes: ZeRO-3 gradient anomaly vs latest-transformers model loading error." + }, + { + "left": "issue:37428", + "right": "issue:43957", + "accept": false, + "reason": "Both are loading-related, but one is an import error in flash-attention utils and the other is a meta-device compatibility bug; not the same bug." + }, + { + "left": "issue:43606", + "right": "issue:44112", + "accept": false, + "reason": "Different scope and root cause: CPU offload device mismatch vs a stale GraniteSpeech CI test." + }, + { + "left": "issue:45200", + "right": "issue:45325", + "accept": false, + "reason": "Both involve multimodal models, but the bugs are different: missing mm_token_type_ids default vs incorrect rope/position scaling." + }, + { + "left": "issue:43334", + "right": "issue:43525", + "accept": true, + "reason": "Both report the same pad_token_id AttributeError during model loading, likely the same config regression across different model classes." + }, + { + "left": "issue:44554", + "right": "issue:45356", + "accept": false, + "reason": "Unrelated issues: MPS attention correctness vs tokenizer codec handling/warning regression." + }, + { + "left": "issue:44442", + "right": "issue:45198", + "accept": false, + "reason": "Both touch tokenizers, but one is AutoTokenizer loading and the other is save_pretrained/tokenization for a specific model; not the same bug." + }, + { + "left": "issue:37428", + "right": "issue:43824", + "accept": false, + "reason": "Different import errors involving different symbols and code paths; no clear shared underlying bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 61, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:40990", + "issue:43295", + "issue:43388", + "issue:43493", + "issue:43526", + "issue:43572", + "issue:43701", + "issue:43873", + "issue:44464", + "issue:44466", + "issue:44589", + "issue:44610", + "issue:44625", + "issue:44898", + "issue:44987", + "issue:45127", + "issue:45412" + ], + "soft_pairs": [ + "issue:44898|issue:45412", + "issue:44466|issue:45127", + "issue:43388|issue:43526", + "issue:43295|issue:44610", + "issue:43493|issue:43701", + "issue:40990|issue:44625", + "issue:43572|issue:44987", + "issue:43572|issue:44589", + "issue:43873|issue:44464" + ], + "prepared_review_unit_hash": "f5f9e7ff4e7fdd65972caef0a23858dee352345599c3643a4bc66ab93b4f4ea8", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12678, + "estimated_input_tokens": 3170, + "estimated_eval_tokens": 6596 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are not duplicates of one another; they span unrelated bugs across model loading, processors, training utilities, serialization, and memory behavior. No PRs are present.", + "confidence": 0.93, + "canonical_issue_reason": "No single issue is a good canonical representative because the cluster is heterogeneous rather than one underlying bug.", + "canonical_pr_reason": null, + "best_issue_reason": "None of the issues stands out as a cluster anchor for duplicate triage; they do not share a concrete code path or symptom family.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44898", + "right": "issue:45412", + "accept": false, + "reason": "Different problems: one is an image-resolution mismatch in Perceiver classification, the other is RT-DETR memory not being released after deletion." + }, + { + "left": "issue:44466", + "right": "issue:45127", + "accept": false, + "reason": "Unrelated bugs: inconsistent lm_head serialization vs LoRA merge collapse after extending vocabulary; same broad area of model weights, but not the same failure mode or fix." + }, + { + "left": "issue:43388", + "right": "issue:43526", + "accept": false, + "reason": "Both involve labels, but one is gather_for_metrics dropping tuple elements in the last batch while the other is BeitImageProcessorFast reduce_labels returning only one label; distinct code paths." + }, + { + "left": "issue:43295", + "right": "issue:44610", + "accept": false, + "reason": "Processor/tokenizer regression with images is unrelated to OmDet-Turbo producing the wrong input size; different components and symptoms." + }, + { + "left": "issue:43493", + "right": "issue:43701", + "accept": false, + "reason": "SigLIP2 implementation discrepancy is not the same as a resume_from_checkpoint key mismatch; no shared underlying bug." + }, + { + "left": "issue:40990", + "right": "issue:44625", + "accept": false, + "reason": "Extreme perplexity on a specific model/dataset is unrelated to Qwen3.5 num_labels propagation between configs." + }, + { + "left": "issue:43572", + "right": "issue:44987", + "accept": false, + "reason": "Both mention loading/configuration, but StableLmConfig missing pad_token_idx and transformers>=5.1.0 failing on physical-intelligence/fast are different issues." + }, + { + "left": "issue:43572", + "right": "issue:44589", + "accept": false, + "reason": "StableLmConfig pad_token_idx regression and Float8 storage lookup failure are separate bugs in different subsystems." + }, + { + "left": "issue:43873", + "right": "issue:44464", + "accept": false, + "reason": "Quantization/offloading behavior and chunked generation with compiled forward are not the same code-path problem." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 62, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:38175", + "issue:42915", + "issue:43819", + "issue:43824", + "issue:43844", + "issue:43975", + "issue:44360", + "issue:44514", + "issue:44521", + "issue:44561", + "issue:44625", + "issue:44849", + "issue:44871", + "issue:44936", + "issue:45003", + "issue:45083", + "issue:45412" + ], + "soft_pairs": [ + "issue:43844|issue:44936", + "issue:44514|issue:44521", + "issue:44625|issue:44871", + "issue:43824|issue:43975", + "issue:44360|issue:44561", + "issue:38175|issue:45412", + "issue:44849|issue:45003", + "issue:43819|issue:45083", + "issue:42915|issue:43824" + ], + "prepared_review_unit_hash": "ccc5b0eea96adcf876a6eda5b369e9820213966041276027401b5b16709ac7d3", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12483, + "estimated_input_tokens": 3121, + "estimated_eval_tokens": 6498 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The items are a heterogeneous mix of unrelated model-specific bugs and regressions. None of the soft pairs look like the same underlying issue, so I would not merge any of them as duplicates.", + "confidence": 0.94, + "canonical_issue_reason": "Issue 44514 is the clearest, most concrete bug report in the set and is the closest thematic anchor to another item in the cluster, but the cluster overall is too mixed to have a strong true canonical issue.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44514 has a specific failure mode, a narrow API surface (`Qwen2_5_VLProcessor.apply_chat_template`), and reproducible runtime behavior, so it is the strongest representative issue even though it is not a duplicate hub for the rest of the set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43844", + "right": "issue:44936", + "accept": false, + "reason": "Different failure modes: ZeRO-3 gradient explosion during training vs. `trainer.evaluate()` breaking after `trainer.train()`. Same library area, but not the same bug." + }, + { + "left": "issue:44514", + "right": "issue:44521", + "accept": false, + "reason": "Both involve `apply_chat_template`, but one is a batched-input crash with `padding=False` and the other is all-zero assistant masks for multimodal inputs. Related area, distinct underlying defects." + }, + { + "left": "issue:44625", + "right": "issue:44871", + "accept": false, + "reason": "Config propagation for Qwen3.5 `num_labels` is unrelated to Gemma-3 EOS token configuration mismatch." + }, + { + "left": "issue:43824", + "right": "issue:43975", + "accept": false, + "reason": "An import error for a Qwen2.5-VL class is unrelated to DeepSeek Coder detokenization in v5." + }, + { + "left": "issue:44360", + "right": "issue:44561", + "accept": false, + "reason": "A missing ReLU in the DSA indexer and the removal of `is_torch_fx_available` breaking `trust_remote_code` models are unrelated regressions." + }, + { + "left": "issue:38175", + "right": "issue:45412", + "accept": false, + "reason": "Unexpected zero probabilities in SigLIP2 is unrelated to RT-DETR memory not being released after deletion." + }, + { + "left": "issue:44849", + "right": "issue:45003", + "accept": false, + "reason": "Qwen3.5 hidden-state handling and unsafe `sys.modules[]` access are different code paths and different bug classes." + }, + { + "left": "issue:43819", + "right": "issue:45083", + "accept": false, + "reason": "DAC latent/STE mismatch is unrelated to `_get_feat_extract_output_lengths` behavior in qwen3_omni_moe." + }, + { + "left": "issue:42915", + "right": "issue:43824", + "accept": false, + "reason": "FineGrainedFP8Config failure in Qwen3Moe is unrelated to the Qwen2.5-VL import error." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 63, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:38175", + "issue:43278", + "issue:43295", + "issue:43450", + "issue:43493", + "issue:43575", + "issue:43606", + "issue:43827", + "issue:43856", + "issue:43873", + "issue:43901", + "issue:43994", + "issue:44464", + "issue:44568", + "issue:44610", + "issue:44857", + "issue:44991", + "issue:45083" + ], + "soft_pairs": [ + "issue:43575|issue:44464", + "issue:43295|issue:43873", + "issue:43278|issue:43856", + "issue:43295|issue:43994", + "issue:43295|issue:43450", + "issue:43827|issue:43901", + "issue:44568|issue:44991", + "issue:43606|issue:45083", + "issue:44610|issue:44857", + "issue:38175|issue:43493" + ], + "prepared_review_unit_hash": "f3a27664077d4da5645cf423964194124b65306b6f7b1ec4c7424d4e0d065c08", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13416, + "estimated_input_tokens": 3354, + "estimated_eval_tokens": 6964 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These soft pairs are mostly superficial similarities across different bugs. They span unrelated models, APIs, and failure modes, so none of the candidate pairs should be merged as duplicates.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue stands out: the items cover unrelated regressions, model-specific bugs, and docs issues, and none of the soft pairs match the same concrete underlying defect.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue for the cluster; there is no cohesive theme strong enough to treat one issue as the representative duplicate target.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43575", + "right": "issue:44464", + "accept": false, + "reason": "Different problems: Qwen2 TP loading OOM vs chunked generation inconsistency under compiled forward." + }, + { + "left": "issue:43295", + "right": "issue:43873", + "accept": false, + "reason": "Custom processor/tokenizer regression is unrelated to quantization offload behavior." + }, + { + "left": "issue:43278", + "right": "issue:43856", + "accept": false, + "reason": "Embedding dtype drift in evaluate is not the same bug as Qwen3 MoE training memory inefficiency." + }, + { + "left": "issue:43295", + "right": "issue:43994", + "accept": false, + "reason": "Processor.tokenizer regression and SigLIP2 nonsensical outputs are different concrete failures." + }, + { + "left": "issue:43295", + "right": "issue:43450", + "accept": false, + "reason": "Custom tokenizer/image regression is unrelated to batched video processor shape bugs." + }, + { + "left": "issue:43827", + "right": "issue:43901", + "accept": false, + "reason": "Both are docs-related, but they concern different API references and different behavioral mismatches." + }, + { + "left": "issue:44568", + "right": "issue:44991", + "accept": false, + "reason": "Tokenizer special-token handling for one model is distinct from tokenizer loading failure for another model." + }, + { + "left": "issue:43606", + "right": "issue:45083", + "accept": false, + "reason": "CPU offload device mismatch for bark-small is unrelated to a qwen3_omni_moe helper-function behavior issue." + }, + { + "left": "issue:44610", + "right": "issue:44857", + "accept": false, + "reason": "Processor input-size mismatch and float16 AMP crash are different code paths and symptoms." + }, + { + "left": "issue:38175", + "right": "issue:43493", + "accept": false, + "reason": "Both involve SigLIP2, but one reports zero probabilities and the other a HF-vs-JAX implementation discrepancy; not the same concrete bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 64, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:37428", + "issue:43425", + "issue:43450", + "issue:43493", + "issue:43653", + "issue:44112", + "issue:44186", + "issue:44351", + "issue:44462", + "issue:44488", + "issue:44561", + "issue:44568", + "issue:44779", + "issue:45127", + "issue:45198", + "issue:45245", + "issue:45290", + "issue:45381" + ], + "soft_pairs": [ + "issue:44462|issue:44561", + "issue:45127|issue:45245", + "issue:44488|issue:44568", + "issue:44561|issue:44779", + "issue:43450|issue:43493", + "issue:37428|issue:43425", + "issue:45290|issue:45381", + "issue:43653|issue:44112", + "issue:37428|issue:44351", + "issue:44186|issue:45198" + ], + "prepared_review_unit_hash": "53095fa9cc5e7179ce459b46bc0f84ec8f64c6dead5be1445c77e15ae7d60c27", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13437, + "estimated_input_tokens": 3360, + "estimated_eval_tokens": 6976 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No soft-edge pair looks like the same underlying bug/change. The items cluster around tokenizer/loading/regression complaints, but each pair points to a different model, API symbol, or failure mode, so they should stay separate.", + "confidence": 0.81, + "canonical_issue_reason": "issue:44462 is the most representative standalone bug report: it describes a concrete, user-visible regression in a core loading path (AutoTokenizer ignoring tokenizer.json from the repository) and is broader than the model-specific tokenizer complaints.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44462 has the clearest and most generalizable failure mode among the issues, making it the best anchor for this set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44462", + "right": "issue:44561", + "accept": false, + "reason": "Different failures: tokenizer.json loading regression vs removal of is_torch_fx_available breaking trust_remote_code models. Same ecosystem, not the same bug." + }, + { + "left": "issue:45127", + "right": "issue:45245", + "accept": false, + "reason": "Unrelated problems: LoRA merge/collapsed outputs with extended vocab vs a category-count limit error. No shared code-path or fix." + }, + { + "left": "issue:44488", + "right": "issue:44568", + "accept": false, + "reason": "Both are tokenizer-related, but one is a repo-loading issue for a specific model and the other is add_special_tokens not adding BOS/EOS for mdeberta-v3-base. Different concrete behavior." + }, + { + "left": "issue:44561", + "right": "issue:44779", + "accept": false, + "reason": "Both mention v5 tokenizer breakage, but one is a trust_remote_code import/API regression and the other is Deepseek tokenization output changes. Not the same bug." + }, + { + "left": "issue:43450", + "right": "issue:43493", + "accept": false, + "reason": "Video processor batched-shape bug vs SigLIP2 HF/JAX discrepancy. Different models and different code paths." + }, + { + "left": "issue:37428", + "right": "issue:43425", + "accept": false, + "reason": "Both are compatibility/import issues, but one is a missing flash-attention helper symbol and the other is Torch 2.10 incompatibility. Too broad to merge." + }, + { + "left": "issue:45290", + "right": "issue:45381", + "accept": false, + "reason": "Chat template crash on tool-call assistant messages vs Qwen2.5-VL video vision_position_ids issue. Different subsystems and failure modes." + }, + { + "left": "issue:43653", + "right": "issue:44112", + "accept": false, + "reason": "BigBirdTokenizer special-token registration bug vs a GraniteSpeech CI stale test failure. One is a product bug, the other appears test-specific." + }, + { + "left": "issue:37428", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but for different missing symbols in different modules. No evidence of the same underlying regression." + }, + { + "left": "issue:44186", + "right": "issue:45198", + "accept": false, + "reason": "LayoutLMv2 tokenizer NER/padding crash vs Wav2Vec2 save_pretrained/tokenization failure. Different model families and different breakage points." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 65, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:29942", + "issue:32090", + "issue:39692", + "issue:43262", + "issue:43278", + "issue:43450", + "issue:43526", + "issue:43575", + "issue:43819", + "issue:43825", + "issue:44112", + "issue:44265", + "issue:44448", + "issue:44743", + "issue:44855", + "issue:45083", + "issue:45198" + ], + "soft_pairs": [ + "issue:44448|issue:44743", + "issue:43450|issue:43575", + "issue:39692|issue:43526", + "issue:29942|issue:32090", + "issue:43278|issue:43825", + "issue:44265|issue:45198", + "issue:43819|issue:44112", + "issue:44855|issue:45083", + "issue:43262|issue:43450" + ], + "prepared_review_unit_hash": "82882b49b456771120976dbf3534733c6f955a718091c0449429114f391b1b64", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12730, + "estimated_input_tokens": 3183, + "estimated_eval_tokens": 6622 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set: the items span unrelated bugs across training, inference, multimodal processors, export, and CI. All soft edges look like superficial similarity rather than the same underlying defect.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44448", + "right": "issue:44743", + "accept": false, + "reason": "Different models and failure modes: Pegasus output drift vs Qwen3 recurrent-state reset with cache. No shared code-path bug." + }, + { + "left": "issue:43450", + "right": "issue:43575", + "accept": false, + "reason": "Video batch-shape bug vs Qwen2 TP OOM are unrelated symptoms in different subsystems." + }, + { + "left": "issue:39692", + "right": "issue:43526", + "accept": false, + "reason": "SigLIP2 docs/example errors do not match BeitImageProcessorFast label reduction behavior." + }, + { + "left": "issue:29942", + "right": "issue:32090", + "accept": false, + "reason": "Flash Attention test failures and Trainer _gpu_broadcast_one NoneType error are distinct training/runtime issues." + }, + { + "left": "issue:43278", + "right": "issue:43825", + "accept": false, + "reason": "Embedding dtype mismatch during eval is unrelated to pipeline translation-task error messaging." + }, + { + "left": "issue:44265", + "right": "issue:45198", + "accept": false, + "reason": "torch.export failure with torch_compilable_check is a different problem from Wav2Vec2 save_pretrained/tokenization failures." + }, + { + "left": "issue:43819", + "right": "issue:44112", + "accept": false, + "reason": "DAC latent/forward mismatch is not the same as a stale CI test in GraniteSpeech." + }, + { + "left": "issue:44855", + "right": "issue:45083", + "accept": false, + "reason": "Python 3.13 import parsing error in DebertaV2Model is unrelated to qwen3_omni_moe feature-length logic." + }, + { + "left": "issue:43262", + "right": "issue:43450", + "accept": false, + "reason": "Audio chat-template sampling-rate default bug and video batched-shape bug are different modality-processing issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 66, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:38175", + "issue:39692", + "issue:43335", + "issue:43404", + "issue:43450", + "issue:43526", + "issue:43550", + "issue:43611", + "issue:43819", + "issue:43931", + "issue:44448", + "issue:44493", + "issue:44779", + "issue:44977", + "issue:45083", + "issue:45200", + "issue:45229", + "issue:45381" + ], + "soft_pairs": [ + "issue:45200|issue:45381", + "issue:43611|issue:43931", + "issue:43404|issue:45200", + "issue:39692|issue:44493", + "issue:43819|issue:44493", + "issue:44448|issue:44779", + "issue:38175|issue:44493", + "issue:44977|issue:45229", + "issue:43450|issue:43526", + "issue:43550|issue:45083", + "issue:43335|issue:45083" + ], + "prepared_review_unit_hash": "8dadbb5335fdc46c00dc09ce53e56099c9be9bb7bebb7f1ab860377602196df1", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13728, + "estimated_input_tokens": 3432, + "estimated_eval_tokens": 7120 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are mostly unrelated bug reports that only share superficial wording around regressions, model loading, or processor behavior. None of the soft pairs look like the same underlying issue, so there is no safe duplicate merge candidate.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue is appropriate: the set spans distinct bugs across different models, processors, and failure modes, so any representative would be arbitrary.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a strong global canonical representative; issue:44493 is the broadest wording, but it is still too generic and not actually aligned with the other reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45200", + "right": "issue:45381", + "accept": false, + "reason": "Different models and different tensor fields: Gemma4 mm_token_type_ids handling vs Qwen2.5-VL video vision_position_ids. Not the same bug." + }, + { + "left": "issue:43611", + "right": "issue:43931", + "accept": false, + "reason": "Both concern model loading, but one is a base_model_prefix regression and the other is a specific Qwen3-VL weight-shape mismatch. Different failure modes." + }, + { + "left": "issue:43404", + "right": "issue:45200", + "accept": false, + "reason": "Unrelated model-specific issues: untied lm_head weights in Mistral3 vs missing default mm_token_type_ids in Gemma4." + }, + { + "left": "issue:39692", + "right": "issue:44493", + "accept": false, + "reason": "SigLIP2 doc-example mistakes are not the same as the general position-id warning across many models." + }, + { + "left": "issue:43819", + "right": "issue:44493", + "accept": false, + "reason": "DAC forward/from_latents mismatch is a codec/quantization logic bug, not a position-id loading warning." + }, + { + "left": "issue:44448", + "right": "issue:44779", + "accept": false, + "reason": "Both mention v4/v5 behavior changes, but one is Pegasus generation output and the other is Deepseek tokenizer correctness. Different code paths." + }, + { + "left": "issue:38175", + "right": "issue:44493", + "accept": false, + "reason": "SigLIP2 zero probabilities is a model output issue, not the broad unexpected position-id key issue." + }, + { + "left": "issue:44977", + "right": "issue:45229", + "accept": false, + "reason": "Flash-attention generation failure in Qwen3.5 is unrelated to Gemma4 multi-GPU CUDA OOM." + }, + { + "left": "issue:43450", + "right": "issue:43526", + "accept": false, + "reason": "Both are processor bugs, but one is batched video shape handling and the other is Beit reduce_labels returning only one label. Not the same defect." + }, + { + "left": "issue:43550", + "right": "issue:45083", + "accept": false, + "reason": "Torch.compile/SDPA failure in Bamba-9B-v2 is unrelated to qwen3_omni_moe feature-length helper behavior." + }, + { + "left": "issue:43335", + "right": "issue:45083", + "accept": false, + "reason": "SwitchTransformers sparse-layer config bug and qwen3_omni_moe length helper bug are unrelated subsystems and symptoms." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 67, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:29127", + "issue:32090", + "issue:40444", + "issue:43066", + "issue:43335", + "issue:43526", + "issue:43550", + "issue:43606", + "issue:43653", + "issue:43723", + "issue:44448", + "issue:44462", + "issue:44554", + "issue:44857", + "issue:44977", + "issue:45072", + "issue:45372", + "issue:45406" + ], + "soft_pairs": [ + "issue:29127|issue:32090", + "issue:43335|issue:43550", + "issue:43066|issue:44462", + "issue:40444|issue:43526", + "issue:44857|issue:45072", + "issue:45372|issue:45406", + "issue:44448|issue:44977", + "issue:43723|issue:44554", + "issue:43606|issue:44857", + "issue:43335|issue:43653" + ], + "prepared_review_unit_hash": "9fa8c2f434601f26b2675537f40f24b4fafd402bf9b8d09b92f13797b703d0e9", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13575, + "estimated_input_tokens": 3394, + "estimated_eval_tokens": 7044 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "All proposed soft matches are superficial theme overlaps (tokenizers, precision, Gemma 4, etc.) but each pair points to a different concrete failure path, model, or subsystem. None should be merged as duplicates.", + "confidence": 0.97, + "canonical_issue_reason": "No canonical issue: the cluster is a loose set of unrelated bugs rather than one underlying defect.", + "canonical_pr_reason": null, + "best_issue_reason": "No single issue is a safe representative for this cluster because none of the soft pairs share the same concrete code-path failure.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:29127", + "right": "issue:32090", + "accept": false, + "reason": "Different bugs: LayoutLMv3 error messaging vs Trainer/_gpu_broadcast_one NoneType failure." + }, + { + "left": "issue:43335", + "right": "issue:43550", + "accept": false, + "reason": "Different model/code paths: SwitchTransformers config bug vs Bamba torch.compile+SDPA failure." + }, + { + "left": "issue:43066", + "right": "issue:44462", + "accept": false, + "reason": "Both tokenizer-related, but one is decoder type selection and the other is AutoTokenizer ignoring tokenizer.json." + }, + { + "left": "issue:40444", + "right": "issue:43526", + "accept": false, + "reason": "Different areas: Qwen2.5-VL multi-image IterableDataset failure vs BEiT fast image processor label reduction bug." + }, + { + "left": "issue:44857", + "right": "issue:45072", + "accept": false, + "reason": "Both precision-related, but they affect different models and different paths (LwDetr AMP crash vs SwitchTransformers/TimmWrapper bfloat16 mismatch)." + }, + { + "left": "issue:45372", + "right": "issue:45406", + "accept": false, + "reason": "Gemma 4 processor issues, but one is a missing dependency import and the other is a missing _tokenizer attribute in serve; not the same bug." + }, + { + "left": "issue:44448", + "right": "issue:44977", + "accept": false, + "reason": "Different failures in different models: Pegasus output regression vs Qwen3.5 flash-attention generation bug." + }, + { + "left": "issue:43723", + "right": "issue:44554", + "accept": false, + "reason": "Tokenizer loading in v5 is unrelated to an MPS attention correctness issue." + }, + { + "left": "issue:43606", + "right": "issue:44857", + "accept": false, + "reason": "Different bugs: CPU offload device mismatch in bark-small vs float16 AMP crash in LwDetrImageLoss." + }, + { + "left": "issue:43335", + "right": "issue:43653", + "accept": false, + "reason": "Completely different subsystems: SwitchTransformers config vs BigBird tokenizer special-token registration." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 68, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:34689", + "issue:35141", + "issue:37428", + "issue:41720", + "issue:43335", + "issue:43504", + "issue:43550", + "issue:43653", + "issue:43873", + "issue:44112", + "issue:44493", + "issue:44898", + "issue:44987", + "issue:45072", + "issue:45083", + "issue:45198", + "issue:45229" + ], + "soft_pairs": [ + "issue:44898|issue:45198", + "issue:37428|issue:44987", + "issue:43335|issue:43504", + "issue:43335|issue:44112", + "issue:43653|issue:45083", + "issue:43335|issue:45072", + "issue:43873|issue:45229", + "issue:43873|issue:44493", + "issue:43550|issue:44112", + "issue:34689|issue:35141", + "issue:41720|issue:45229" + ], + "prepared_review_unit_hash": "ca41e4c396d8fdfdfc9d4ba9dfaf70d3dbad332eb56ca17c2c08906bffb157d1", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 11, + "serialized_chars": 13227, + "estimated_input_tokens": 3307, + "estimated_eval_tokens": 6870 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the soft-similarity pairs point to different bugs across config, tokenizer, loading, quantization, CI, and inference paths. I would not merge any pair as duplicates.", + "confidence": 0.93, + "canonical_issue_reason": "No single canonical issue fits the cluster: the items describe unrelated regressions rather than one underlying bug.", + "canonical_pr_reason": null, + "best_issue_reason": "If a representative issue must be chosen, issue 43873 is the broadest and most central-looking candidate because it is open and has the most inbound activity, but it still should not be treated as the duplicate center.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44898", + "right": "issue:45198", + "accept": false, + "reason": "Different model families and failure modes: Perceiver image-resize interpolation vs Wav2Vec2 save/tokenization." + }, + { + "left": "issue:37428", + "right": "issue:44987", + "accept": false, + "reason": "An import-time missing helper is not the same bug as a model-loading failure for a specific repo." + }, + { + "left": "issue:43335", + "right": "issue:43504", + "accept": false, + "reason": "SwitchTransformers sparse-layer construction and BEiT legacy-field loading are distinct code paths." + }, + { + "left": "issue:43335", + "right": "issue:44112", + "accept": false, + "reason": "A model-config bug is unrelated to a GraniteSpeech CI test about stale device overrides." + }, + { + "left": "issue:43653", + "right": "issue:45083", + "accept": false, + "reason": "Tokenizer special-token registration and feature-extractor length computation are different subsystems." + }, + { + "left": "issue:43335", + "right": "issue:45072", + "accept": false, + "reason": "Sparse-layer creation in SwitchTransformers is unrelated to bfloat16 dtype mismatch failures." + }, + { + "left": "issue:43873", + "right": "issue:45229", + "accept": false, + "reason": "Quantization/offloading behavior and Gemma4 multi-GPU CUDA OOM are not the same underlying issue." + }, + { + "left": "issue:43873", + "right": "issue:44493", + "accept": false, + "reason": "Offloading with quantization does not match the position-id key regression described here." + }, + { + "left": "issue:43550", + "right": "issue:44112", + "accept": false, + "reason": "Bamba torch.compile+SDPA failure and GraniteSpeech CI stale override failure are unrelated." + }, + { + "left": "issue:34689", + "right": "issue:35141", + "accept": false, + "reason": "Llama 3.2 vision loading regression and embedding reinit after resize_embeddings are separate bugs." + }, + { + "left": "issue:41720", + "right": "issue:45229", + "accept": false, + "reason": "Qwen3 auto-device-map cudaErrorAssert and Gemma4 CUDA OOM are different runtime failures." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 69, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:29942", + "issue:32090", + "issue:33357", + "issue:35141", + "issue:43065", + "issue:43278", + "issue:43329", + "issue:43720", + "issue:43976", + "issue:43994", + "issue:44112", + "issue:44442", + "issue:44448", + "issue:44466", + "issue:45072", + "issue:45412", + "issue:45440" + ], + "soft_pairs": [ + "issue:43278|issue:43329", + "issue:43976|issue:45440", + "issue:43720|issue:44112", + "issue:43065|issue:43994", + "issue:29942|issue:35141", + "issue:43065|issue:45412", + "issue:32090|issue:33357", + "issue:44442|issue:45072", + "issue:44448|issue:44466", + "issue:43994|issue:45412" + ], + "prepared_review_unit_hash": "586f02377a5fbf41d63138061d2fccd85f9cb9e9a33fa6eaf22a5755a4935619", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12985, + "estimated_input_tokens": 3247, + "estimated_eval_tokens": 6750 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: it mixes unrelated bugs in multimodal processing, dtype/serialization, CI, tokenizer loading, and model-specific regressions. None of the soft edges look like true duplicates or the same underlying fix.", + "confidence": 0.94, + "canonical_issue_reason": "Issue 44466 is the most concrete and self-contained regression, but the cluster does not contain a clear duplicate group to canonically represent.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44466 is the strongest standalone issue from a triage perspective: specific behavior, clear reproduction context, and a well-scoped bug report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43278", + "right": "issue:43329", + "accept": false, + "reason": "Different bugs: one is training/evaluate dtype drift for embeddings, the other is an undefined-variable error in multimodal token counting for video." + }, + { + "left": "issue:43976", + "right": "issue:45440", + "accept": false, + "reason": "Not the same change: Python version compatibility for Transformers 5.1.0 is unrelated to DeepSeekV3 diverging from the remote implementation." + }, + { + "left": "issue:43720", + "right": "issue:44112", + "accept": false, + "reason": "Both are CI-flavored, but they affect different models and failure modes: BitNet unpacking vs a stale GraniteSpeech device override test." + }, + { + "left": "issue:43065", + "right": "issue:43994", + "accept": false, + "reason": "Different model families and symptoms: dummy Conv2d in Sam3PixelDecoder versus nonsensical outputs from SigLIP2 with AutoModel/pipeline." + }, + { + "left": "issue:29942", + "right": "issue:35141", + "accept": false, + "reason": "Flash Attention test failures are unrelated to token-embedding resizing causing output embeddings to be reinitialized in post_init." + }, + { + "left": "issue:43065", + "right": "issue:45412", + "accept": false, + "reason": "Both mention model internals, but one is about a dummy layer in Sam3PixelDecoder and the other is a memory-release/GC issue in RT-DETR." + }, + { + "left": "issue:32090", + "right": "issue:33357", + "accept": false, + "reason": "Different root causes and subsystems: Trainer GPU broadcast NoneType error versus a MacOS bus error with a community CLIP model." + }, + { + "left": "issue:44442", + "right": "issue:45072", + "accept": false, + "reason": "Tokenizer-loading failure and bfloat16 dtype mismatches are separate problems, not the same code-path bug." + }, + { + "left": "issue:44448", + "right": "issue:44466", + "accept": false, + "reason": "Both involve model output behavior, but one is a Pegasus v4/v5 output discrepancy and the other is serialization of lm_head.weight tied to device placement." + }, + { + "left": "issue:43994", + "right": "issue:45412", + "accept": false, + "reason": "SigLIP2 incorrect inference output is unrelated to RT-DETR memory not being released after deletion." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 70, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:25251", + "issue:32090", + "issue:42915", + "issue:42994", + "issue:43257", + "issue:43278", + "issue:43381", + "issue:43653", + "issue:43756", + "issue:44206", + "issue:44442", + "issue:44479", + "issue:44734", + "issue:44871", + "issue:44977", + "issue:45406", + "issue:45440" + ], + "soft_pairs": [ + "issue:42915|issue:42994", + "issue:44734|issue:45406", + "issue:25251|issue:32090", + "issue:44871|issue:44977", + "issue:43257|issue:43278", + "issue:43653|issue:44442", + "issue:43278|issue:43381", + "issue:44206|issue:44479", + "issue:43756|issue:45440" + ], + "prepared_review_unit_hash": "7130dde198022493c50f9e35be68f5d9400059cd75ae7ebe4570cce0dc9cd1ac", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12557, + "estimated_input_tokens": 3140, + "estimated_eval_tokens": 6536 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items do not form a real duplicate cluster; they span unrelated bugs in tokenizers, serving, model configs, quantization, and training/runtime behavior. All soft-similarity pairs should be rejected.", + "confidence": 0.93, + "canonical_issue_reason": "Issue 45440 is the strongest standalone report in the set: it is a concrete, user-facing model divergence bug with clear scope and active references. If one issue must anchor this otherwise unrelated group, it is the most representative.", + "canonical_pr_reason": null, + "best_issue_reason": "45440 is the best issue candidate overall because it describes a specific, actionable correctness problem rather than a broad symptom or model-specific one-off, and it has the clearest potential for an eventual fix.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42915", + "right": "issue:42994", + "accept": false, + "reason": "Different failures: FineGrainedFP8Config incompatibility for Qwen3Moe vs quantized model saving. Same quantization theme, but not the same code path or fix." + }, + { + "left": "issue:44734", + "right": "issue:45406", + "accept": false, + "reason": "Serving KV-cache indexing crash and Gemma4Processor missing _tokenizer are unrelated subsystems and failure modes." + }, + { + "left": "issue:25251", + "right": "issue:32090", + "accept": false, + "reason": "Top-k pipeline output shape regression is unrelated to Trainer broadcasting a NoneType into _gpu_broadcast_one." + }, + { + "left": "issue:44871", + "right": "issue:44977", + "accept": false, + "reason": "Different models and different bugs: eos_token_id config mismatch vs flash-attention generation regression." + }, + { + "left": "issue:43257", + "right": "issue:43278", + "accept": false, + "reason": "Both mention model state handling, but one is MoE weight conversion with accelerate+deepspeed and the other is embedding dtype drift during evaluate; not the same underlying bug." + }, + { + "left": "issue:43653", + "right": "issue:44442", + "accept": false, + "reason": "Both concern tokenizers, but one is a special-token registration bug causing empty decode, while the other is an AutoTokenizer loading failure for a different tokenizer class." + }, + { + "left": "issue:43278", + "right": "issue:43381", + "accept": false, + "reason": "Eval-mode behavior is the only overlap; dtype changes in evaluation and gradient checkpointing restrictions are distinct issues." + }, + { + "left": "issue:44206", + "right": "issue:44479", + "accept": false, + "reason": "Unsupported feature argument in an audio feature extractor is unrelated to the video-input regression affecting Qwen multimodal models." + }, + { + "left": "issue:43756", + "right": "issue:45440", + "accept": false, + "reason": "Both are about model implementation mismatches, but they affect different architectures and different mechanisms; they are not the same concrete bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 71, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:29127", + "issue:29942", + "issue:30064", + "issue:32090", + "issue:33290", + "issue:33357", + "issue:34567", + "issue:39692", + "issue:43334", + "issue:43531", + "issue:43653", + "issue:43824", + "issue:44279", + "issue:44610", + "issue:44743", + "issue:45103", + "issue:45372", + "issue:45412" + ], + "soft_pairs": [ + "issue:45103|issue:45372", + "issue:43824|issue:44279", + "issue:43653|issue:44743", + "issue:44610|issue:45412", + "issue:29942|issue:30064", + "issue:33357|issue:39692", + "issue:29127|issue:34567", + "issue:43334|issue:43531", + "issue:32090|issue:33290" + ], + "prepared_review_unit_hash": "382886945bceba1fbe828cd8f3a5d99e2bca70c1aea83d9576c95195c71cd1c4", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13218, + "estimated_input_tokens": 3305, + "estimated_eval_tokens": 6866 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The items are a mixed bag of unrelated issues across different models, trainers, and processors. None of the soft pairs look like the same underlying bug, so there is no single canonical representative for this cluster.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45103", + "right": "issue:45372", + "accept": false, + "reason": "Both are import/runtime failures, but they involve different components and different root causes: auto_docstring annotation handling vs Gemma 4 processor loading." + }, + { + "left": "issue:43824", + "right": "issue:44279", + "accept": false, + "reason": "43824 is a specific missing export for Qwen2_5_VLForConditionalGeneration; 44279 is a generic dependency issue without the same concrete failure mode." + }, + { + "left": "issue:43653", + "right": "issue:44743", + "accept": false, + "reason": "BigBird tokenizer special-token decoding and Qwen3 recurrent-state reset are unrelated code paths and bugs." + }, + { + "left": "issue:44610", + "right": "issue:45412", + "accept": false, + "reason": "Processor input-size mismatch for OmDet-Turbo and RT-DETR memory not being released are different problems with different fixes." + }, + { + "left": "issue:29942", + "right": "issue:30064", + "accept": false, + "reason": "Flash Attention 2 test failures and void segmentation map processing are unrelated issues." + }, + { + "left": "issue:33357", + "right": "issue:39692", + "accept": false, + "reason": "Both touch vision-model workflows, but one is a MacOS bus error for a CLIP model and the other is a SigLIP2 documentation/example problem; not the same bug." + }, + { + "left": "issue:29127", + "right": "issue:34567", + "accept": false, + "reason": "LayoutLMv3 box-validation messaging and TrainerState token-count tracking are completely different areas." + }, + { + "left": "issue:43334", + "right": "issue:43531", + "accept": false, + "reason": "Both mention Qwen3, but one is a missing pad_token_id import/load failure for Qwen3-VL and the other is a sliding_window bug for Qwen3-MoE; different concrete problems." + }, + { + "left": "issue:32090", + "right": "issue:33290", + "accept": false, + "reason": "Trainer NoneType broadcast error and DeepSpeed Adafactor OOM are both training-related, but they are distinct failure modes with different underlying causes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 72, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:29942", + "issue:36010", + "issue:43232", + "issue:43262", + "issue:43381", + "issue:43388", + "issue:43504", + "issue:43526", + "issue:43701", + "issue:44186", + "issue:44206", + "issue:44387", + "issue:44442", + "issue:44857", + "issue:44898", + "issue:44977", + "issue:45229", + "issue:45245" + ], + "soft_pairs": [ + "issue:43504|issue:44857", + "issue:44977|issue:45245", + "issue:44387|issue:45229", + "issue:43526|issue:44186", + "issue:43262|issue:44206", + "issue:43381|issue:43701", + "issue:44442|issue:44898", + "issue:29942|issue:36010", + "issue:43232|issue:43388" + ], + "prepared_review_unit_hash": "ec70473b353b125658545d7fedf7dba05466f60cfcb780ca146f1806b9d883bb", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13089, + "estimated_input_tokens": 3273, + "estimated_eval_tokens": 6802 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the items are mostly unrelated bugs across generation, vision, audio, training, and memory/OOM paths. None of the soft pairs look like true duplicates, so there is no strong duplicate-canonical issue; issue 44387 is only the best representative by discussion activity and broad impact.", + "confidence": 0.22, + "canonical_issue_reason": "Issue 44387 is the closest thing to a representative item because it has the highest discussion activity and a broad CUDA/memory symptom, but it does not \u05d1\u05d0\u05de\u05ea subsume the rest of the cluster.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44387 is the best overall representative for routing/triage, but only as a loose cluster anchor rather than a duplicate target.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43504", + "right": "issue:44857", + "accept": false, + "reason": "Different bug classes: pretrained preset loading/legacy field vs float16 AMP loss crash on CUDA. No shared code path." + }, + { + "left": "issue:44977", + "right": "issue:45245", + "accept": false, + "reason": "Flash-attention generation bug vs category-count limit error. These are unrelated failures in different subsystems." + }, + { + "left": "issue:44387", + "right": "issue:45229", + "accept": false, + "reason": "Both mention CUDA/OOM, but one is int4 quantization reserved-memory growth and the other is Gemma4 multi-GPU inference OOM. Too different to merge." + }, + { + "left": "issue:43526", + "right": "issue:44186", + "accept": false, + "reason": "BeitImageProcessorFast label reduction bug vs LayoutLMv2Tokenizer crash on NER/padding. Different components and failure modes." + }, + { + "left": "issue:43262", + "right": "issue:44206", + "accept": false, + "reason": "Audio chat-template sampling-rate default vs unsupported center arg in feature extraction. Not the same underlying bug." + }, + { + "left": "issue:43381", + "right": "issue:43701", + "accept": false, + "reason": "Gradient-checkpointing eval-mode restriction and resume-from-checkpoint key mismatch are unrelated training/checkpoint issues." + }, + { + "left": "issue:44442", + "right": "issue:44898", + "accept": false, + "reason": "AutoTokenizer loading failure and Perceiver non-default-resolution failure are different model/input-path bugs." + }, + { + "left": "issue:29942", + "right": "issue:36010", + "accept": false, + "reason": "Flash Attention 2 test failures vs GenerationMixin import error. Similar packaging area, but not the same concrete problem." + }, + { + "left": "issue:43232", + "right": "issue:43388", + "accept": false, + "reason": "Generation kwargs update after sync_gpus vs gather_for_metrics dropping last-batch labels. Different code paths and fixes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 73, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:29127", + "issue:33357", + "issue:35141", + "issue:42915", + "issue:43526", + "issue:43550", + "issue:43606", + "issue:43701", + "issue:43761", + "issue:43931", + "issue:43994", + "issue:44188", + "issue:44479", + "issue:44857", + "issue:45003", + "issue:45290", + "issue:45440" + ], + "soft_pairs": [ + "issue:29127|issue:33357", + "issue:43550|issue:43701", + "issue:43526|issue:44857", + "issue:43606|issue:43701", + "issue:42915|issue:43931", + "issue:29127|issue:35141", + "issue:43526|issue:43994", + "issue:45003|issue:45440", + "issue:44479|issue:45290", + "issue:43761|issue:44188" + ], + "prepared_review_unit_hash": "c7ab324624448528ce0eb89d5ac86f3cc0db8fa3d0c1dcb2b78544afbdaf9b4e", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12896, + "estimated_input_tokens": 3224, + "estimated_eval_tokens": 6704 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues span unrelated regressions in OCR/layout, vision models, multimodal chat templates, compilation, checkpointing, and runtime safety. None of the soft-similar pairs look like the same underlying bug or a mergeable PR-equivalent fix.", + "confidence": 0.94, + "canonical_issue_reason": "Issue 45003 is the broadest and most general runtime-safety report in the set, and it is still open; if one issue must anchor the cluster, it is the least model-specific. That said, the cluster does not contain a true duplicate set.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45003 is the best representative because it is open, broadly scoped, and not tied to one narrow model or regression path. The other issues are more specific one-off failures and do not unify into a single concrete bug.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:29127", + "right": "issue:33357", + "accept": false, + "reason": "Different subsystems and failures: LayoutLMv3 box-validation messaging vs CLIP community model bus error on macOS." + }, + { + "left": "issue:43550", + "right": "issue:43701", + "accept": false, + "reason": "Bamba torch.compile/SDPA failure is unrelated to resume_from_checkpoint key-mapping mismatch." + }, + { + "left": "issue:43526", + "right": "issue:44857", + "accept": false, + "reason": "BeitImageProcessorFast label reduction bug is unrelated to LwDetrImageLoss AMP/CUDA crash." + }, + { + "left": "issue:43606", + "right": "issue:43701", + "accept": false, + "reason": "CPU offload device mismatch in bark-small is unrelated to checkpoint key mismatch on resume." + }, + { + "left": "issue:42915", + "right": "issue:43931", + "accept": false, + "reason": "Both mention Qwen, but the concrete bugs differ: FineGrainedFP8Config failure vs weight-shape mismatch during model loading." + }, + { + "left": "issue:29127", + "right": "issue:35141", + "accept": false, + "reason": "LayoutLMv3 error-message clarity and token-embedding reinit behavior are unrelated code paths." + }, + { + "left": "issue:43526", + "right": "issue:43994", + "accept": false, + "reason": "Image processor reduce_labels bug does not match a Siglip2 AutoModel/pipeline output-quality regression." + }, + { + "left": "issue:45003", + "right": "issue:45440", + "accept": false, + "reason": "sys.modules access safety issue is different from DeepseekV3MoE implementation divergence; too broad to merge." + }, + { + "left": "issue:44479", + "right": "issue:45290", + "accept": false, + "reason": "Video-input regression for Qwen VL models is unrelated to chat template crashes on tool-call assistant messages." + }, + { + "left": "issue:43761", + "right": "issue:44188", + "accept": false, + "reason": "CLIPVision hidden_states regression and torch.compile attention-kernel divergence are different failure modes and code paths." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 74, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:41628", + "issue:43316", + "issue:43525", + "issue:43575", + "issue:43646", + "issue:43673", + "issue:43701", + "issue:43931", + "issue:43994", + "issue:44077", + "issue:44206", + "issue:44387", + "issue:44743", + "issue:44811", + "issue:44964", + "issue:45216", + "issue:45372", + "issue:45440" + ], + "soft_pairs": [ + "issue:43316|issue:43525", + "issue:44743|issue:44811", + "issue:44964|issue:45440", + "issue:43994|issue:44206", + "issue:43931|issue:45216", + "issue:43673|issue:44743", + "issue:43646|issue:44077", + "issue:43575|issue:43701", + "issue:43575|issue:44387", + "issue:41628|issue:45372" + ], + "prepared_review_unit_hash": "c50a167359d60a583314302e3628625a7f7245c99bbb340372cf133cd2cbef83", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13408, + "estimated_input_tokens": 3352, + "estimated_eval_tokens": 6960 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a similarity-only grouping of unrelated issue reports across different models, APIs, and regressions. None of the soft pairs look like the same underlying bug/change, so they should stay separate.", + "confidence": 0.91, + "canonical_issue_reason": "If one issue must represent the cluster, issue 43646 is the broadest umbrella-style regression report, but it is still too generic to truly subsume the others.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43646 is the most generally phrased and could serve as a loose representative, though the cluster is not cohesive enough to treat it as a true canonical duplicate.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43316", + "right": "issue:43525", + "accept": false, + "reason": "Both are config/API complaints, but they target different models and different missing attributes; not the same bug." + }, + { + "left": "issue:44743", + "right": "issue:44811", + "accept": false, + "reason": "Different failure modes in different subsystems: recurrent cache reset vs Whisper decoding parameter handling." + }, + { + "left": "issue:44964", + "right": "issue:45440", + "accept": false, + "reason": "Different models and different problems: a loading failure for Phi-4 multimodal vs a DeepSeekV3 implementation divergence." + }, + { + "left": "issue:43994", + "right": "issue:44206", + "accept": false, + "reason": "Both involve vision/processor paths, but one is bad inference output and the other is a crash from an unsupported argument; not the same code-path issue." + }, + { + "left": "issue:43931", + "right": "issue:45216", + "accept": false, + "reason": "Both mention Qwen checkpoints, but one is a weight-shape mismatch on load and the other is a save_pretrained regression." + }, + { + "left": "issue:43673", + "right": "issue:44743", + "accept": false, + "reason": "Cache-related wording overlaps, but the affected models and concrete defects differ." + }, + { + "left": "issue:43646", + "right": "issue:44077", + "accept": false, + "reason": "The first is a broad custom-model-init regression, while the second is a specific PatchTSMixer post_init policy issue." + }, + { + "left": "issue:43575", + "right": "issue:43701", + "accept": false, + "reason": "One is an OOM during tensor-parallel load; the other is a checkpoint key mismatch. Different bugs." + }, + { + "left": "issue:43575", + "right": "issue:44387", + "accept": false, + "reason": "Both mention OOM, but the causes differ materially: tensor parallel loading vs int4 quantization memory growth." + }, + { + "left": "issue:41628", + "right": "issue:45372", + "accept": false, + "reason": "Both are import errors, but they involve different symbols, dependencies, and processor-loading paths." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 75, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:29942", + "issue:33290", + "issue:33666", + "issue:34567", + "issue:34689", + "issue:39692", + "issue:43381", + "issue:43493", + "issue:43611", + "issue:43653", + "issue:43701", + "issue:43827", + "issue:43873", + "issue:44077", + "issue:44206", + "issue:44479", + "issue:44871" + ], + "soft_pairs": [ + "issue:29942|issue:33666", + "issue:43653|issue:43701", + "issue:33290|issue:34567", + "issue:43493|issue:43827", + "issue:34689|issue:43611", + "issue:44077|issue:44479", + "issue:39692|issue:44206", + "issue:43381|issue:43873", + "issue:43873|issue:44871" + ], + "prepared_review_unit_hash": "efbe325ad05b00c916198d2a10b515fbf0f0921db5b7b896b8e194d9fb9443db", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12443, + "estimated_input_tokens": 3111, + "estimated_eval_tokens": 6478 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are thematically broad but not duplicates: they span different models, training/runtime features, docs regressions, and version-specific breakages. There is no single underlying bug or change that ties the cluster together, so all soft pairs should be rejected.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:29942", + "right": "issue:33666", + "accept": false, + "reason": "Different areas: Flash Attention 2 test failures vs Qwen2-VL multi-GPU training. No shared code path or concrete bug." + }, + { + "left": "issue:43653", + "right": "issue:43701", + "accept": false, + "reason": "BigBirdTokenizer special-token registration vs resume_from_checkpoint key mismatch are unrelated training/tokenizer problems." + }, + { + "left": "issue:33290", + "right": "issue:34567", + "accept": false, + "reason": "Adafactor + DeepSpeed OOM is a memory/optimizer issue, while num_input_tokens_seen not updating is trainer-state accounting. Different bugs." + }, + { + "left": "issue:43493", + "right": "issue:43827", + "accept": false, + "reason": "SigLIP2 implementation discrepancy is a model-implementation correctness issue; docs still referencing pipeline() is documentation fallout from v5 removals." + }, + { + "left": "issue:34689", + "right": "issue:43611", + "accept": false, + "reason": "Both are load-time regressions, but for different causes: Llama 3.2 Vision breakage vs base_model_prefix handling in v5.0.0." + }, + { + "left": "issue:44077", + "right": "issue:44479", + "accept": false, + "reason": "PatchTSMixer post_init restriction and Qwen video input regression affect different models and different code paths." + }, + { + "left": "issue:39692", + "right": "issue:44206", + "accept": false, + "reason": "SigLIP2 docs example errors are about example misuse and quantization; LasrFeatureExtractor center-arg regression is an API compatibility bug in another component." + }, + { + "left": "issue:43381", + "right": "issue:43873", + "accept": false, + "reason": "Gradient checkpointing in eval mode and offloading with quantization are distinct runtime behaviors with no clear shared fix." + }, + { + "left": "issue:43873", + "right": "issue:44871", + "accept": false, + "reason": "Quantization/offloading behavior is unrelated to Gemma-3 eos_token_id configuration inconsistency." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 76, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:33290", + "issue:34689", + "issue:35141", + "issue:42175", + "issue:43316", + "issue:43540", + "issue:43643", + "issue:43749", + "issue:43931", + "issue:44162", + "issue:44190", + "issue:44496", + "issue:44514", + "issue:44792", + "issue:44855", + "issue:44877", + "issue:44913", + "issue:45137" + ], + "soft_pairs": [ + "issue:44190|issue:44496", + "issue:33290|issue:35141", + "issue:44162|issue:44792", + "issue:33290|issue:34689", + "issue:44855|issue:45137", + "issue:43540|issue:44514", + "issue:43749|issue:43931", + "issue:43643|issue:44913", + "issue:43316|issue:44877", + "issue:42175|issue:43316" + ], + "prepared_review_unit_hash": "900526a1df8bc75b16537aeeab085c35fb1365cbbf259dd18674f403c3c53e3e", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13413, + "estimated_input_tokens": 3354, + "estimated_eval_tokens": 6964 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a true duplicate set: the issues cover unrelated bugs in training, model loading, config serialization, processor behavior, and environment/backend setup. All soft-similarity pairs should be rejected.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44190", + "right": "issue:44496", + "accept": false, + "reason": "Different failures: local dataset loading in an example script vs unrecognized model/config loading for OLMo." + }, + { + "left": "issue:33290", + "right": "issue:35141", + "accept": false, + "reason": "Different code paths: DeepSpeed Adafactor OOM vs embedding resize/output reinitialization after post_init." + }, + { + "left": "issue:44162", + "right": "issue:44792", + "accept": false, + "reason": "Different models and symptoms: ESM2 regression vs a janus test failure for image generation." + }, + { + "left": "issue:33290", + "right": "issue:34689", + "accept": false, + "reason": "Both are model/training-related, but one is an optimizer OOM and the other is a Llama 3.2 Vision model-loading break; not the same bug." + }, + { + "left": "issue:44855", + "right": "issue:45137", + "accept": false, + "reason": "Completely different problems: Python 3.13 import parsing error vs DeepSpeed ZeRO3 deque underflow." + }, + { + "left": "issue:43540", + "right": "issue:44514", + "accept": false, + "reason": "Both involve Qwen VL/video handling, but they fail in different APIs and model stacks; too different to merge." + }, + { + "left": "issue:43749", + "right": "issue:43931", + "accept": false, + "reason": "Different underlying issues: FSDP CPU RAM efficient loading regression vs weight-shape mismatch for Qwen3-VL-30B-A3B-Instruct." + }, + { + "left": "issue:43643", + "right": "issue:44913", + "accept": false, + "reason": "Different config-loading bugs: missing fields with trust_remote_code vs GPTNeoX rotary_pct not persisting on reload." + }, + { + "left": "issue:43316", + "right": "issue:44877", + "accept": false, + "reason": "Different config discrepancies for different models; not the same defect." + }, + { + "left": "issue:42175", + "right": "issue:43316", + "accept": false, + "reason": "Backend packaging issue for TensorFlow installation vs Gemma3TextConfig API discrepancy; unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 77, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:33290", + "issue:34689", + "issue:36010", + "issue:36331", + "issue:41950", + "issue:43065", + "issue:43316", + "issue:43381", + "issue:43388", + "issue:43404", + "issue:43441", + "issue:43475", + "issue:43688", + "issue:43716", + "issue:43856", + "issue:44617", + "issue:45127", + "issue:45200" + ], + "soft_pairs": [ + "issue:43381|issue:43856", + "issue:43388|issue:43688", + "issue:33290|issue:36331", + "issue:43316|issue:43716", + "issue:43404|issue:43441", + "issue:34689|issue:36010", + "issue:41950|issue:43475", + "issue:45127|issue:45200", + "issue:43065|issue:44617" + ], + "prepared_review_unit_hash": "a57584566082ad85dd61f02b9b909eabdd137a8114795b5ac2483dd88110f224", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13062, + "estimated_input_tokens": 3266, + "estimated_eval_tokens": 6788 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the items describe unrelated bugs across training, loading, model architecture, and pipeline behavior. The soft pairs only show superficial similarity (e.g. shared memory/error wording), not the same underlying defect, so none should be merged as duplicates.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43381", + "right": "issue:43856", + "accept": false, + "reason": "Both mention memory/efficiency, but one is about gradient checkpointing being disallowed in eval mode while the other is MoE training memory usage; different code paths and fixes." + }, + { + "left": "issue:43388", + "right": "issue:43688", + "accept": false, + "reason": "Both involve loss/metrics behavior in training, but one is a gather_for_metrics label-truncation bug and the other is auxiliary-loss normalization; unrelated symptoms and components." + }, + { + "left": "issue:33290", + "right": "issue:36331", + "accept": false, + "reason": "AdaFactor OOM in DeepSpeed and CustomTrainer.compute_loss signature breakage are separate trainer/optimizer issues with no shared failure mode." + }, + { + "left": "issue:43316", + "right": "issue:43716", + "accept": false, + "reason": "Gemma3TextConfig API mismatch and Mistral-3 image preprocessor dtype mismatch are model-specific configuration vs preprocessing bugs, not the same defect." + }, + { + "left": "issue:43404", + "right": "issue:43441", + "accept": false, + "reason": "Tied lm_head weights in Mistral3ForConditionalGeneration and FlashAttention failures in Ministral-3 affect different model internals and execution paths." + }, + { + "left": "issue:34689", + "right": "issue:36010", + "accept": false, + "reason": "Model loading breakage for Llama 3.2 Vision and GenerationMixin import errors are distinct regressions in different subsystems." + }, + { + "left": "issue:41950", + "right": "issue:43475", + "accept": false, + "reason": "A video-classification pipeline choosing image processors and a Sam3Video missing fpn_position_embeddings are different SAM/video model bugs." + }, + { + "left": "issue:45127", + "right": "issue:45200", + "accept": false, + "reason": "LoRA merge collapse with extended vocabulary and Gemma 4 mm_token_type_ids defaults are unrelated model-behavior issues." + }, + { + "left": "issue:43065", + "right": "issue:44617", + "accept": false, + "reason": "Dummy Conv2d in Sam3PixelDecoder and Sam3Video CUDA OOM are not the same underlying problem; one is an implementation detail, the other is a runtime memory issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 78, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:16998", + "issue:22355", + "issue:25251", + "issue:28282", + "issue:43295", + "issue:43452", + "issue:43646", + "issue:43761", + "issue:43825", + "issue:43906", + "issue:44077", + "issue:44242", + "issue:44479", + "issue:44496", + "issue:44534", + "issue:44871", + "issue:44913", + "issue:45290" + ], + "soft_pairs": [ + "issue:16998|issue:22355", + "issue:43761|issue:44479", + "issue:25251|issue:28282", + "issue:43295|issue:43825", + "issue:43295|issue:44871", + "issue:43452|issue:44496", + "issue:44077|issue:44534", + "issue:43761|issue:44242", + "issue:43646|issue:44913", + "issue:43906|issue:45290" + ], + "prepared_review_unit_hash": "6e47538e6299020135f41f84eb2fac2483ea503910e168bb43ed106e15a284fb", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13506, + "estimated_input_tokens": 3377, + "estimated_eval_tokens": 7010 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are heterogeneous and do not form a duplicate cluster: they span unrelated failures in model loading, pipelines, configs, chat templating, and version-specific regressions.", + "confidence": 0.98, + "canonical_issue_reason": "No single canonical issue fits this set; the items describe distinct bugs and questions rather than one underlying problem.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is clearly the global representative, since the cluster is not a duplicate set and the topics do not converge on one code path.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:16998", + "right": "issue:22355", + "accept": false, + "reason": "Different problems: DeBERTa model_max_length behavior vs missing transformers.onnx module." + }, + { + "left": "issue:43761", + "right": "issue:44479", + "accept": false, + "reason": "Both are regressions, but one is CLIP hidden_states output and the other is a Qwen video-input failure; different code paths." + }, + { + "left": "issue:25251", + "right": "issue:28282", + "accept": false, + "reason": "Pipeline top_k output-shape change is unrelated to AutoModel/PyTorch import errors." + }, + { + "left": "issue:43295", + "right": "issue:43825", + "accept": false, + "reason": "One is processor/tokenizer image handling in v4.57.5; the other is a pipeline error-message regression about translation tasks." + }, + { + "left": "issue:43295", + "right": "issue:44871", + "accept": false, + "reason": "Custom processor/tokenizer regression and Gemma-3 eos_token_id config mismatch are distinct bugs." + }, + { + "left": "issue:43452", + "right": "issue:44496", + "accept": false, + "reason": "Both involve model loading, but gguf_file breakage and an unrecognized-model/config.json issue are different failure modes." + }, + { + "left": "issue:44077", + "right": "issue:44534", + "accept": false, + "reason": "Optional post_init validation for patchtsmixer is unrelated to non-persistent buffer initialization corruption." + }, + { + "left": "issue:43761", + "right": "issue:44242", + "accept": false, + "reason": "CLIPVisionModel hidden_states regression is unrelated to MoE load-balancing loss computation." + }, + { + "left": "issue:43646", + "right": "issue:44913", + "accept": false, + "reason": "Custom model initialization breakage in v5 is not the same as GPTNeoXConfig rotary_pct not persisting on reload." + }, + { + "left": "issue:43906", + "right": "issue:45290", + "accept": false, + "reason": "An isolated reproduction of a prior issue is not the same bug as chat template crashes on tool-call assistant messages." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 79, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:41720", + "issue:42617", + "issue:43065", + "issue:43575", + "issue:43742", + "issue:43931", + "issue:44060", + "issue:44479", + "issue:44521", + "issue:44610", + "issue:44734", + "issue:44792", + "issue:44933", + "issue:44977", + "issue:45245", + "issue:45362", + "issue:45381" + ], + "soft_pairs": [ + "issue:43931|issue:44060", + "issue:44521|issue:45381", + "issue:44933|issue:45362", + "issue:44479|issue:44734", + "issue:42617|issue:43575", + "issue:43575|issue:43742", + "issue:43065|issue:44610", + "issue:44792|issue:44977", + "issue:41720|issue:45245" + ], + "prepared_review_unit_hash": "c4ee7f9d59d14f5883b1ca6fef48f4af284755c08c4c35f993b6bddb0c8cb399", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12350, + "estimated_input_tokens": 3088, + "estimated_eval_tokens": 6432 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the items cover unrelated model-loading, multimodal preprocessing, serving, and tensor-shape bugs. None of the soft pairs look like the same underlying defect, so I would not merge any of them as duplicates.", + "confidence": 0.95, + "canonical_issue_reason": "No clear canonical issue: the reports span different models and code paths, so there is no single issue that cleanly represents the cluster.", + "canonical_pr_reason": "No pull requests are present in the cluster.", + "best_issue_reason": "If one must be chosen, issue 44521 is the most broadly phrased and has the strongest external references, but it still does not represent the rest of the items well.", + "best_pr_reason": "No pull requests are present in the cluster.", + "soft_edge_verdicts": [ + { + "left": "issue:43931", + "right": "issue:44060", + "accept": false, + "reason": "Different models and failure modes: one is a Qwen3-VL weight-shape mismatch, the other is a Qwen3-Next tied-weights warning issue." + }, + { + "left": "issue:44521", + "right": "issue:45381", + "accept": false, + "reason": "Both involve multimodal/video handling, but the bugs are different: all-zero assistant masks vs incorrect vision_position_ids in video input." + }, + { + "left": "issue:44933", + "right": "issue:45362", + "accept": false, + "reason": "Unrelated symptoms and targets: a missing import from image_utils versus Qwen3.5 chat runtime crashes." + }, + { + "left": "issue:44479", + "right": "issue:44734", + "accept": false, + "reason": "Different subsystems and root causes: video-input regression across Qwen VL variants versus KV-cache continuation indexing in transformers serve." + }, + { + "left": "issue:42617", + "right": "issue:43575", + "accept": false, + "reason": "Both mention loading/parallelism, but one is a script failure and the other is an OOM while loading a specific model with tensor parallelism." + }, + { + "left": "issue:43575", + "right": "issue:43742", + "accept": false, + "reason": "Different model families and errors: tensor-parallel OOM for Qwen2-57B versus a KeyError loading MobileLLM-125M." + }, + { + "left": "issue:43065", + "right": "issue:44610", + "accept": false, + "reason": "Both are vision-model related, but one is a dummy layer choice in Sam3PixelDecoder and the other is a processor/model image-size mismatch in OmDet-Turbo." + }, + { + "left": "issue:44792", + "right": "issue:44977", + "accept": false, + "reason": "No shared underlying bug: a Janus image-generation test failure versus a Qwen3.5 flash-attention generation issue." + }, + { + "left": "issue:41720", + "right": "issue:45245", + "accept": false, + "reason": "Completely different errors: cudaErrorAssert on A800 for Qwen3 auto device mapping versus a category-cardinality RuntimeError." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 80, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:32090", + "issue:35141", + "issue:41720", + "issue:43388", + "issue:43575", + "issue:43582", + "issue:43650", + "issue:43827", + "issue:43994", + "issue:44190", + "issue:44220", + "issue:44464", + "issue:44534", + "issue:44871", + "issue:44913", + "issue:44964", + "issue:45127", + "issue:45357" + ], + "soft_pairs": [ + "issue:44464|issue:44871", + "issue:41720|issue:43575", + "issue:32090|issue:35141", + "issue:43827|issue:43994", + "issue:44534|issue:44913", + "issue:43388|issue:43582", + "issue:43650|issue:44220", + "issue:44190|issue:44964", + "issue:45127|issue:45357" + ], + "prepared_review_unit_hash": "d99b1fa535ef875d14d81ca25b3665d1300d090564e306e42b7a4031b70bda60", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13228, + "estimated_input_tokens": 3307, + "estimated_eval_tokens": 6870 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: it contains unrelated bugs spanning training, model loading, generation, config reload, docs, and Apple Silicon runtime errors. There is no strong duplicate theme across the items.", + "confidence": 0.96, + "canonical_issue_reason": "Issue 35141 is the most concrete and well-scoped bug report: it describes a specific reproducible regression in `post_init` when `tie_word_embedding=False` after resizing embeddings.", + "canonical_pr_reason": null, + "best_issue_reason": "35141 is the best representative issue because it has a clear root-cause area, specific triggering condition, and a plausible fix surface. The other issues are either more generic, less actionable, or clearly unrelated to one another.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44464", + "right": "issue:44871", + "accept": false, + "reason": "Different problems: chunked generation/compiled forward inconsistency vs. EOS token configuration mismatch. Shared generation context is too broad to treat as the same bug." + }, + { + "left": "issue:41720", + "right": "issue:43575", + "accept": false, + "reason": "Both involve large Qwen model loading, but one is a device-map cuda assert and the other is tensor-parallel OOM. Different failure modes and likely different causes." + }, + { + "left": "issue:32090", + "right": "issue:35141", + "accept": false, + "reason": "Trainer `_gpu_broadcast_one` TypeError with `NoneType` is unrelated to embedding resizing and `post_init` reinitialization behavior." + }, + { + "left": "issue:43827", + "right": "issue:43994", + "accept": false, + "reason": "Docs referencing removed `pipeline()` APIs is a documentation cleanup issue; the Siglip2 issue is a model/pipeline output correctness bug. Not the same underlying problem." + }, + { + "left": "issue:44534", + "right": "issue:44913", + "accept": false, + "reason": "Non-persistent buffer initialization junk and GPTNeoX rotary_pct reload regression are both config/state issues, but they affect different mechanisms and code paths." + }, + { + "left": "issue:43388", + "right": "issue:43582", + "accept": false, + "reason": "`gather_for_metrics` label truncation and Apple Silicon allocator warmup `TypeError` are unrelated subsystems with different symptoms." + }, + { + "left": "issue:43650", + "right": "issue:44220", + "accept": false, + "reason": "A vague request ('ADD THE DATA') is not the same underlying issue as `_torch_extract_fbank_features()` failing." + }, + { + "left": "issue:44190", + "right": "issue:44964", + "accept": false, + "reason": "Both concern loading examples/models, but one is a local dataset issue in an example script and the other is a specific Phi-4 multimodal model load regression." + }, + { + "left": "issue:45127", + "right": "issue:45357", + "accept": false, + "reason": "LoRA merging with extended vocab causing collapse is a different bug from incorrect visual encoder keys in `save_pretrained`; they affect different model components and save paths." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 81, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36246", + "issue:41628", + "issue:43116", + "issue:43335", + "issue:43540", + "issue:43644", + "issue:43761", + "issue:43976", + "issue:44077", + "issue:44079", + "issue:44190", + "issue:44355", + "issue:44496", + "issue:44734", + "issue:44743", + "issue:44855", + "issue:44913", + "issue:45372" + ], + "soft_pairs": [ + "issue:43761|issue:44743", + "issue:44355|issue:44855", + "issue:43335|issue:44855", + "issue:43976|issue:44855", + "issue:43540|issue:44734", + "issue:44496|issue:45372", + "issue:43644|issue:44913", + "issue:44077|issue:44079", + "issue:43116|issue:44190", + "issue:36246|issue:41628" + ], + "prepared_review_unit_hash": "e70fadd55d3b0bf6ffe246f773d2fc2c85ab2bdc366e20e120f8df8a4e1ca5ec", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13692, + "estimated_input_tokens": 3423, + "estimated_eval_tokens": 7102 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set; it groups a variety of unrelated Transformers bugs and regressions. None of the soft-similarity pairs look like the same underlying issue, so all soft edges should be rejected.", + "confidence": 0.97, + "canonical_issue_reason": "No true canonical duplicate stands out: the issues span different models, scripts, regressions, and import/config failures rather than one shared bug.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45372 is the best representative issue because it is recent, open, and has a clearly scoped regression with downstream impact, even though it is not a duplicate hub.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43761", + "right": "issue:44743", + "accept": false, + "reason": "Different code paths and models: CLIPVisionModel hidden_states regression vs Qwen3OmniMoe recurrent-state reset with cache." + }, + { + "left": "issue:44355", + "right": "issue:44855", + "accept": false, + "reason": "Both are import-related, but one is a generic compiled-Python failure and the other is a Python 3.13 torch.jit parsing/IndentationError in DeBERTaV2." + }, + { + "left": "issue:43335", + "right": "issue:44855", + "accept": false, + "reason": "Unrelated bugs: SwitchTransformers sparse-layer config handling vs DeBERTaV2 import failure on Python 3.13." + }, + { + "left": "issue:43976", + "right": "issue:44855", + "accept": false, + "reason": "A broad Python-version compatibility regression is not the same concrete bug as the DeBERTaV2 import parser issue." + }, + { + "left": "issue:43540", + "right": "issue:44734", + "accept": false, + "reason": "Qwen3OmniMoe video-input validation and transformers serve KV-cache tensor indexing are different failures." + }, + { + "left": "issue:44496", + "right": "issue:45372", + "accept": false, + "reason": "Model config resolution failure and mistral_common ReasoningEffort import breakage are distinct causes and fixes." + }, + { + "left": "issue:43644", + "right": "issue:44913", + "accept": false, + "reason": "Global non-persistent buffer initialization bug vs GPTNeoXConfig rotary_pct persistence bug; not the same issue." + }, + { + "left": "issue:44077", + "right": "issue:44079", + "accept": false, + "reason": "Different subsystems: patchtsmixer post_init validation vs ModelOutput key assignment logic." + }, + { + "left": "issue:43116", + "right": "issue:44190", + "accept": false, + "reason": "Different example scripts and failures: multi-label classification output vs local dataset loading in image classification." + }, + { + "left": "issue:36246", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but they concern different symbols and failure modes; not the same concrete missing-export bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 82, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36246", + "issue:38175", + "issue:38617", + "issue:39692", + "issue:41950", + "issue:42175", + "issue:43065", + "issue:43295", + "issue:43335", + "issue:43404", + "issue:44016", + "issue:44190", + "issue:44279", + "issue:44871", + "issue:45216", + "issue:45245", + "issue:45276" + ], + "soft_pairs": [ + "issue:41950|issue:44016", + "issue:43295|issue:45276", + "issue:43404|issue:45216", + "issue:36246|issue:38617", + "issue:42175|issue:44279", + "issue:39692|issue:44190", + "issue:43335|issue:44190", + "issue:38175|issue:43065", + "issue:44871|issue:45245" + ], + "prepared_review_unit_hash": "217235df961f0f03f43911282b18ea687473b4426c673c344a2d6f8f28460df9", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12735, + "estimated_input_tokens": 3184, + "estimated_eval_tokens": 6624 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are not a duplicate cluster: they describe unrelated bugs across different models, pipelines, notebooks, configs, and dependency/setup issues. All soft-edge pairs should be rejected.", + "confidence": 0.98, + "canonical_issue_reason": "No single canonical issue stands out because the items are heterogeneous and do not share the same underlying defect.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a good global canonical candidate; the set spans unrelated reports rather than one bug resurfacing in multiple places.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41950", + "right": "issue:44016", + "accept": false, + "reason": "Video-classification pipeline processor lookup is unrelated to a notebook syntax error." + }, + { + "left": "issue:43295", + "right": "issue:45276", + "accept": false, + "reason": "Different model-specific regressions: processor/tokenizer behavior vs Gemma4 embedding resizing." + }, + { + "left": "issue:43404", + "right": "issue:45216", + "accept": false, + "reason": "Different concrete bugs affecting different models and code paths: tied lm_head weights vs save_pretrained checkpoint content." + }, + { + "left": "issue:36246", + "right": "issue:38617", + "accept": false, + "reason": "Separate import errors for different symbols in different modules." + }, + { + "left": "issue:42175", + "right": "issue:44279", + "accept": false, + "reason": "One is about pip extras/backends, the other is a vague dependency complaint without the same specific failure." + }, + { + "left": "issue:39692", + "right": "issue:44190", + "accept": false, + "reason": "SigLIP2 documentation/example issues are not the same as a local dataset loading failure in a training script." + }, + { + "left": "issue:43335", + "right": "issue:44190", + "accept": false, + "reason": "SwitchTransformers sparse-layer config bug is unrelated to dataset loading in image-classification no-trainer." + }, + { + "left": "issue:38175", + "right": "issue:43065", + "accept": false, + "reason": "Unexpected probabilities in SigLIP2 and a dummy Conv2d in Sam3PixelDecoder are unrelated model bugs." + }, + { + "left": "issue:44871", + "right": "issue:45245", + "accept": false, + "reason": "Gemma-3 eos token configuration mismatch is unrelated to the categories limit runtime error." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 83, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:42907", + "issue:43299", + "issue:43441", + "issue:43479", + "issue:43854", + "issue:44016", + "issue:44112", + "issue:44186", + "issue:44220", + "issue:44246", + "issue:44466", + "issue:44734", + "issue:44792", + "issue:44913", + "issue:45003", + "issue:45216", + "issue:45375" + ], + "soft_pairs": [ + "issue:44186|issue:44734", + "issue:43479|issue:45375", + "issue:44246|issue:45003", + "issue:44016|issue:44220", + "issue:44466|issue:44913", + "issue:44112|issue:44792", + "issue:42907|issue:43441", + "issue:43854|issue:44792", + "issue:43299|issue:45216" + ], + "prepared_review_unit_hash": "fef536c76b81b46eff37c0beb95475c6f0815cbb777ac02b9f5735b69f6791fe", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12507, + "estimated_input_tokens": 3127, + "estimated_eval_tokens": 6510 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The candidates are mostly superficial matches by subsystem or bug shape, but they describe different models, code paths, or failure modes. I would not merge any of the soft pairs.", + "confidence": 0.94, + "canonical_issue_reason": "Issue 45216 is the clearest concrete regression report: it names the model family, the failing API (`save_pretrained`), and a specific version threshold, making it the most representative issue here.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45216 is the strongest standalone issue to anchor on because it is precise, reproducible, and actionable; the other items are either narrower, more test-specific, or describe unrelated model/config paths.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44186", + "right": "issue:44734", + "accept": false, + "reason": "Both are Transformers bugs, but one is a tokenizer/NER padding crash and the other is a serve/KV-cache indexing crash; different components and failure paths." + }, + { + "left": "issue:43479", + "right": "issue:45375", + "accept": false, + "reason": "Both involve config handling, but they affect different model families and different fields (`None` defaults vs a missing `deepstack_visual_indexes` field); not the same underlying bug." + }, + { + "left": "issue:44246", + "right": "issue:45003", + "accept": false, + "reason": "Import slowness and unsafe `sys.modules` access are related only loosely; the reports do not describe the same concrete defect or fix." + }, + { + "left": "issue:44016", + "right": "issue:44220", + "accept": false, + "reason": "A notebook syntax error and an audio feature extraction issue are unrelated." + }, + { + "left": "issue:44466", + "right": "issue:44913", + "accept": false, + "reason": "Both concern serialization/reload behavior, but one is about `lm_head.weight` tied-weight saving and the other about `rotary_pct` config persistence; different bugs." + }, + { + "left": "issue:44112", + "right": "issue:44792", + "accept": false, + "reason": "Both are failing tests, but they target different models and different assertions; this is not the same bug." + }, + { + "left": "issue:42907", + "right": "issue:43441", + "accept": false, + "reason": "Both mention Ministral/Devstral, but one is about saving dequantized weights and the other about FlashAttention loading; separate code paths." + }, + { + "left": "issue:43854", + "right": "issue:44792", + "accept": false, + "reason": "Different model families and different failing scenarios; only shared symptom is a test failure." + }, + { + "left": "issue:43299", + "right": "issue:45216", + "accept": false, + "reason": "Both are Qwen-related version regressions, but one breaks model loading while the other corrupts saved checkpoints; not the same underlying issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 84, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:43295", + "issue:43299", + "issue:43643", + "issue:43673", + "issue:43824", + "issue:43976", + "issue:44016", + "issue:44062", + "issue:44112", + "issue:44279", + "issue:44336", + "issue:44496", + "issue:44933", + "issue:44964", + "issue:44987", + "issue:45335", + "issue:45357" + ], + "soft_pairs": [ + "issue:43295|issue:45335", + "issue:43673|issue:44336", + "issue:44016|issue:44062", + "issue:43643|issue:44496", + "issue:43824|issue:44933", + "issue:43976|issue:44987", + "issue:44112|issue:44964", + "issue:43976|issue:44279", + "issue:43299|issue:45357" + ], + "prepared_review_unit_hash": "df8183d71849d0360f44618ccd936fd50cbdf04d6ef07a361283a4a529c9f20c", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12576, + "estimated_input_tokens": 3144, + "estimated_eval_tokens": 6544 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mixed cluster of transformer regression reports; the pairs are broadly related by theme but do not look like the same underlying bug, so no soft edges are accepted.", + "confidence": 0.92, + "canonical_issue_reason": "No clear canonical issue: this set spans unrelated regressions (model loading, config parsing, imports, CI, docs, and environment compatibility) rather than one duplicate bug.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44964 is the most concrete, user-facing latest-transformers model-loading regression and the best representative issue in this mixed cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43295", + "right": "issue:45335", + "accept": false, + "reason": "Different failures: processor/tokenizer multimodal regression vs resize_token_embeddings not propagating to decoder.embed_tokens." + }, + { + "left": "issue:43673", + "right": "issue:44336", + "accept": false, + "reason": "Unrelated code paths: chunked_prefill cache handling vs ANSI color output in loading_report." + }, + { + "left": "issue:44016", + "right": "issue:44062", + "accept": false, + "reason": "Notebook syntax error and AddedToken keyword-arg collision are distinct bugs." + }, + { + "left": "issue:43643", + "right": "issue:44496", + "accept": false, + "reason": "Both involve loading/config, but one is trust_remote_code field loss and the other is missing model_type for an unrecognized model." + }, + { + "left": "issue:43824", + "right": "issue:44933", + "accept": false, + "reason": "Import failures in different areas: missing Qwen2_5_VL class export vs a non-existent image_utils import." + }, + { + "left": "issue:43976", + "right": "issue:44987", + "accept": false, + "reason": "Python-version compatibility issue vs a model-loading failure; not the same bug." + }, + { + "left": "issue:44112", + "right": "issue:44964", + "accept": false, + "reason": "CI stale-test failure is unrelated to the Phi-4 multimodal loading regression." + }, + { + "left": "issue:43976", + "right": "issue:44279", + "accept": false, + "reason": "General dependency complaint and Python 3.9/3.10 compatibility problem are not the same underlying issue." + }, + { + "left": "issue:43299", + "right": "issue:45357", + "accept": false, + "reason": "Both are Qwen multimodal reports, but one is model loading and the other is save_pretrained key serialization; different code paths and fixes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 85, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:43526", + "issue:43577", + "issue:43901", + "issue:44075", + "issue:44242", + "issue:44297", + "issue:44351", + "issue:44368", + "issue:44462", + "issue:44496", + "issue:44521", + "issue:45042", + "issue:45081", + "issue:45200", + "issue:45216", + "issue:45278", + "issue:45325" + ], + "soft_pairs": [ + "issue:44075|issue:44368", + "issue:43526|issue:43901", + "issue:44496|issue:45042", + "issue:44462|issue:45081", + "issue:44521|issue:45325", + "issue:44242|issue:45200", + "issue:43901|issue:44496", + "issue:43577|issue:45216", + "issue:44351|issue:45278", + "issue:44297|issue:44462" + ], + "prepared_review_unit_hash": "169bb639b78d78cd30b86cc5ece9d5b6c326d8b26efe6f08d387d1b8b20857c1", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 13098, + "estimated_input_tokens": 3275, + "estimated_eval_tokens": 6806 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are a collection of unrelated Hugging Face Transformers bug reports and one docs issue; none of the soft pairs look like the same underlying defect, so all candidate merges should be rejected. There is no clear canonical issue or PR for the set as a whole.", + "confidence": 0.93, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44075", + "right": "issue:44368", + "accept": false, + "reason": "Different problems: SGD optimizer arguments being ignored vs a Qwen tie_word_embeddings warning during LoRA fine-tuning. Shared training context only, not the same bug." + }, + { + "left": "issue:43526", + "right": "issue:43901", + "accept": false, + "reason": "BeitImageProcessorFast label reduction bug vs TextClassificationPipeline documentation mismatch. One is a runtime processing bug, the other is a docs/behavior note." + }, + { + "left": "issue:44496", + "right": "issue:45042", + "accept": false, + "reason": "Unrecognized model config import error vs PIL backend image processors requiring torchvision. Both are loading-related but clearly distinct failure modes." + }, + { + "left": "issue:44462", + "right": "issue:45081", + "accept": false, + "reason": "AutoTokenizer ignoring tokenizer.json vs a Mistral regex patch crash on backend_tokenizer access. Both involve tokenizers, but not the same code path or defect." + }, + { + "left": "issue:44521", + "right": "issue:45325", + "accept": false, + "reason": "All-zero assistant masks in multimodal chat templating vs Qwen2.5-VL rope index temporal position scaling. Different multimodal internals and different outputs affected." + }, + { + "left": "issue:44242", + "right": "issue:45200", + "accept": false, + "reason": "Missing load balancing loss when output_router_logits=False vs mm_token_type_ids required for text-only fine-tuning. Different model features and different broken behavior." + }, + { + "left": "issue:43901", + "right": "issue:44496", + "accept": false, + "reason": "Docs mentioning return_all_scores vs an unrecognized model/config loading error. No shared bug or fix path." + }, + { + "left": "issue:43577", + "right": "issue:45216", + "accept": false, + "reason": "BLIP2 dtype staying float32 when loading vs Qwen3.5 save_pretrained regression. Completely separate models and code paths." + }, + { + "left": "issue:44351", + "right": "issue:45278", + "accept": false, + "reason": "Specific HybridCache import failure vs broad import errors after upgrading 4.57.0 to 5.5.0. The symptoms are too different to be the same issue." + }, + { + "left": "issue:44297", + "right": "issue:44462", + "accept": false, + "reason": "Wrong tokenizer_class saved in tokenizer_config.json vs AutoTokenizer ignoring tokenizer.json. Both touch tokenizer persistence, but one is save-side metadata and the other is load-side selection logic." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 86, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:38175", + "issue:41950", + "issue:43479", + "issue:43525", + "issue:43650", + "issue:43854", + "issue:43976", + "issue:44279", + "issue:44462", + "issue:44479", + "issue:44496", + "issue:44617", + "issue:44779", + "issue:44877", + "issue:44964", + "issue:45020", + "issue:45245", + "issue:45405" + ], + "soft_pairs": [ + "issue:43854|issue:44964", + "issue:44279|issue:45405", + "issue:44462|issue:44779", + "issue:44479|issue:45245", + "issue:43976|issue:45405", + "issue:41950|issue:43650", + "issue:41950|issue:45020", + "issue:43479|issue:43525", + "issue:38175|issue:44617", + "issue:44496|issue:44877" + ], + "prepared_review_unit_hash": "ce2d6ec592648060b04cde78471f2bd0903804e745c561832b30537b083f2874", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13214, + "estimated_input_tokens": 3304, + "estimated_eval_tokens": 6864 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: it mixes unrelated model-loading errors, tokenizer regressions, config/default bugs, dependency/version issues, and video pipeline problems. None of the soft pairs look like the same underlying bug, so all soft edges should be rejected.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43854", + "right": "issue:44964", + "accept": false, + "reason": "Both are model-loading failures, but they affect different models and likely different code paths/symptoms." + }, + { + "left": "issue:44279", + "right": "issue:45405", + "accept": false, + "reason": "These are separate dependency complaints: a generic transformers dependency issue vs an unreleased MIN_PEFT_VERSION bump." + }, + { + "left": "issue:44462", + "right": "issue:44779", + "accept": false, + "reason": "Both involve tokenizers, but one is about ignoring tokenizer.json while the other is a DeepSeek tokenization regression." + }, + { + "left": "issue:44479", + "right": "issue:45245", + "accept": false, + "reason": "Completely different failures: video-input regression in Qwen VL models vs a category-count runtime limit error." + }, + { + "left": "issue:43976", + "right": "issue:45405", + "accept": false, + "reason": "Different version-compatibility problems affecting different dependencies (Python support vs PEFT version)." + }, + { + "left": "issue:41950", + "right": "issue:43650", + "accept": false, + "reason": "Video-classification processor lookup and a vague data request are not the same bug." + }, + { + "left": "issue:41950", + "right": "issue:45020", + "accept": false, + "reason": "The first is a video-classification processor lookup bug; the second is broad remote_code breakage, not the same concrete issue." + }, + { + "left": "issue:43479", + "right": "issue:43525", + "accept": false, + "reason": "Both mention configs, but one is bad default vision/audio init and the other is a missing pad_token_id attribute." + }, + { + "left": "issue:38175", + "right": "issue:44617", + "accept": false, + "reason": "Unrelated symptoms: zero probabilities in siglip2 versus CUDA out-of-memory in Sam3Video." + }, + { + "left": "issue:44496", + "right": "issue:44877", + "accept": false, + "reason": "Both concern model/config loading, but they are different errors and not clearly the same underlying fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 87, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:39692", + "issue:42994", + "issue:43295", + "issue:43299", + "issue:43317", + "issue:43441", + "issue:43450", + "issue:43650", + "issue:43723", + "issue:43854", + "issue:43901", + "issue:44016", + "issue:44162", + "issue:44279", + "issue:44291", + "issue:44683", + "issue:44779", + "issue:45290" + ], + "soft_pairs": [ + "issue:43295|issue:44779", + "issue:43299|issue:43854", + "issue:43441|issue:44683", + "issue:39692|issue:43901", + "issue:43450|issue:44279", + "issue:43723|issue:44162", + "issue:43650|issue:45290", + "issue:42994|issue:43317", + "issue:44016|issue:44291" + ], + "prepared_review_unit_hash": "e708bb142e3d99e94e08510ff63b5d6d84b1a361426694d410b38dc4b72ca991", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 12995, + "estimated_input_tokens": 3249, + "estimated_eval_tokens": 6754 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are not a single duplicate cluster; they span unrelated bugs in tokenizers, model loading, attention kernels, docs, notebooks, and quantization/offload behavior. None of the soft pairs look like the same underlying issue or change.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "If a single anchor is needed, issue 43295 is the clearest standalone report: it describes a concrete v4.57.5 regression, specific API breakage, and a focused reproduction path. But it is not a duplicate of the other issues in this set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43295", + "right": "issue:44779", + "accept": false, + "reason": "Both mention tokenizer regressions in v5, but one is about processor.tokenizer / image-to-tokenizer behavior and the other is Deepseek tokenization correctness; different models, symptoms, and code paths." + }, + { + "left": "issue:43299", + "right": "issue:43854", + "accept": false, + "reason": "Both are model-loading failures, but Qwen3VL MoE loading and GLM-4.7-Flash unit-test loading are distinct models with no concrete shared failure mode." + }, + { + "left": "issue:43441", + "right": "issue:44683", + "accept": false, + "reason": "FlashAttention is the only overlap. One is a Ministral-3 model-specific regression; the other is a torch>=2.9 compiled flex_attention failure. Different kernels and root causes." + }, + { + "left": "issue:39692", + "right": "issue:43901", + "accept": false, + "reason": "One is a SigLIP2 docs example with model/processor mismatch and quantization failure; the other is a TextClassificationPipeline docs mismatch about return_all_scores. Both are documentation issues but not the same bug." + }, + { + "left": "issue:43450", + "right": "issue:44279", + "accept": false, + "reason": "Video processor batched-shape bug versus a generic dependency issue with transformers; no shared code path or symptom." + }, + { + "left": "issue:43723", + "right": "issue:44162", + "accept": false, + "reason": "Tokenizer loading via AutoTokenizer in v5 and ESM2 being broken are both tokenizer-related at a high level, but they refer to different models and different breakages." + }, + { + "left": "issue:43650", + "right": "issue:45290", + "accept": false, + "reason": "The title/content signal for 43650 is too generic to match the specific chat-template crash in 45290; no evidence of the same underlying bug." + }, + { + "left": "issue:42994", + "right": "issue:43317", + "accept": false, + "reason": "Quantized model saving failure and dequantized model loading with device_map=auto offload are related to quantization broadly, but they are separate operations with different failure points." + }, + { + "left": "issue:44016", + "right": "issue:44291", + "accept": false, + "reason": "A notebook syntax error and an init_empty_weights TypeError are unrelated failure types; no shared code path." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 88, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:43065", + "issue:43122", + "issue:43262", + "issue:43550", + "issue:43650", + "issue:43673", + "issue:43761", + "issue:44261", + "issue:44263", + "issue:44466", + "issue:44521", + "issue:44671", + "issue:44779", + "issue:44898", + "issue:44964", + "issue:45071", + "issue:45276", + "issue:45372" + ], + "soft_pairs": [ + "issue:43550|issue:44964", + "issue:43065|issue:44898", + "issue:44261|issue:44263", + "issue:43262|issue:44521", + "issue:44466|issue:45276", + "issue:44671|issue:44779", + "issue:43122|issue:43650", + "issue:45071|issue:45372", + "issue:43673|issue:43761" + ], + "prepared_review_unit_hash": "fd5b9e6dca681c40a59a2709fe6825d6729e47c5cae2a4e74a76e1030c47d2d2", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13175, + "estimated_input_tokens": 3294, + "estimated_eval_tokens": 6844 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No duplicate pair looks strong enough to merge: the items span unrelated bugs across tokenization, vision, audio, generation, config typing, and model-loading paths. All soft edges are rejected.", + "confidence": 0.97, + "canonical_issue_reason": "No single canonical duplicate emerges; the issues describe distinct failures rather than the same underlying bug.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #44521 is the strongest standalone report: it has a specific multimodal `apply_chat_template()` failure, clear impact, and active discussion/inbound references.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43550", + "right": "issue:44964", + "accept": false, + "reason": "Different problems and code paths: torch.compile/SDPA failure in Bamba vs model-loading failure for Phi-4 multimodal." + }, + { + "left": "issue:43065", + "right": "issue:44898", + "accept": false, + "reason": "Both are vision-related but not the same bug: dummy Conv2d in Sam3PixelDecoder vs Perceiver non-default-resolution interpolation failure." + }, + { + "left": "issue:44261", + "right": "issue:44263", + "accept": false, + "reason": "Distinct defects: missing `rms_norm_eps` in MLA layernorm precision handling vs `torch.split()` return values in GlmMoeDsaIndexer." + }, + { + "left": "issue:43262", + "right": "issue:44521", + "accept": false, + "reason": "Same API name, different failures: audio processor sampling-rate default mismatch vs multimodal assistant mask generation returning all zeros." + }, + { + "left": "issue:44466", + "right": "issue:45276", + "accept": false, + "reason": "Different underlying issues: `lm_head.weight` serialization/tied-weights behavior vs Gemma4 embedding-resize propagation across layers." + }, + { + "left": "issue:44671", + "right": "issue:44779", + "accept": false, + "reason": "Different models and symptoms: CamemBERT masked-LM prediction regression vs Deepseek tokenizer regression." + }, + { + "left": "issue:43122", + "right": "issue:43650", + "accept": false, + "reason": "Tokenizer regression vs unrelated placeholder/data request; no shared bug mechanism." + }, + { + "left": "issue:45071", + "right": "issue:45372", + "accept": false, + "reason": "Unrelated failures: `PretrainedConfig` type checking breakage vs Gemma 4 processor import error from `mistral_common` dependency." + }, + { + "left": "issue:43673", + "right": "issue:43761", + "accept": false, + "reason": "Different code paths and symptoms: generation cache missing during chunked prefill vs CLIPVisionModel hidden_states being None despite request." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 89, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:42175", + "issue:43295", + "issue:43352", + "issue:43475", + "issue:43525", + "issue:43824", + "issue:43881", + "issue:43901", + "issue:43976", + "issue:43994", + "issue:44016", + "issue:44246", + "issue:44617", + "issue:44623", + "issue:44749", + "issue:45250", + "issue:45397" + ], + "soft_pairs": [ + "issue:43352|issue:45250", + "issue:42175|issue:43976", + "issue:44623|issue:44749", + "issue:43901|issue:43994", + "issue:44623|issue:45397", + "issue:43295|issue:44617", + "issue:43824|issue:43976", + "issue:44246|issue:44617", + "issue:43475|issue:44016", + "issue:43525|issue:43881" + ], + "prepared_review_unit_hash": "19ca581ad7773de890a4148faa19c935d6a661dfd430d81432b757527403ccb5", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12679, + "estimated_input_tokens": 3170, + "estimated_eval_tokens": 6596 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly unrelated issue reports spanning backend packaging, Flash Attention, multimodal processor regressions, model-specific load failures, docs, and performance. None of the soft pairs look like the same underlying bug or change, so they should all remain separate.", + "confidence": 0.92, + "canonical_issue_reason": "No single canonical issue fits this cluster: the items describe different products, model families, and failure modes rather than one duplicate thread.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43824 is the strongest standalone issue report here: it has clear user impact, multiple references, and a specific import failure, but it is not a canonical duplicate target for the rest.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43352", + "right": "issue:45250", + "accept": false, + "reason": "Both mention Flash Attention 2.0, but one is a Nemotron model-specific unsupported-feature error and the other is a generic Flash Attention topic; not the same bug." + }, + { + "left": "issue:42175", + "right": "issue:43976", + "accept": false, + "reason": "TensorFlow backend packaging with torch extras is unrelated to a Python 3.9/3.10 compatibility/install support issue." + }, + { + "left": "issue:44623", + "right": "issue:44749", + "accept": false, + "reason": "Processor save_pretrained missing files is a serialization bug, while the Chinese issue is about inference/filtering slowing down after upgrade; different code paths." + }, + { + "left": "issue:43901", + "right": "issue:43994", + "accept": false, + "reason": "Docs mismatch for return_all_scores is not the same as SigLIP2 producing bad outputs with AutoModel/pipeline." + }, + { + "left": "issue:44623", + "right": "issue:45397", + "accept": false, + "reason": "Both involve processor/model saving or loading, but one is missing processor files and the other is a gemma-4 zero3 from_pretrained issue; distinct problems." + }, + { + "left": "issue:43295", + "right": "issue:44617", + "accept": false, + "reason": "A processor.tokenizer regression is unrelated to Sam3Video CUDA out-of-memory." + }, + { + "left": "issue:43824", + "right": "issue:43976", + "accept": false, + "reason": "Missing Qwen2_5_VL import and Python version incompatibility are separate release/package problems." + }, + { + "left": "issue:44246", + "right": "issue:44617", + "accept": false, + "reason": "Slow import times are unrelated to a video model running out of memory." + }, + { + "left": "issue:43475", + "right": "issue:44016", + "accept": false, + "reason": "A missing Sam3VisionEncoderOutput attribute is a runtime model API bug, while the notebook syntax error is a docs/notebook issue." + }, + { + "left": "issue:43525", + "right": "issue:43881", + "accept": false, + "reason": "Llama4Config missing pad_token_id and glm-4v-9b loading failure are different model-specific attribute/loading errors." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 90, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:41628", + "issue:41950", + "issue:43901", + "issue:43976", + "issue:44188", + "issue:44220", + "issue:44246", + "issue:44297", + "issue:44336", + "issue:44361", + "issue:44373", + "issue:44462", + "issue:44589", + "issue:44749", + "issue:45356", + "issue:45362", + "issue:45397" + ], + "soft_pairs": [ + "issue:44589|issue:45362", + "issue:43901|issue:44246", + "issue:44188|issue:44336", + "issue:41628|issue:43976", + "issue:44462|issue:45356", + "issue:41950|issue:44220", + "issue:44297|issue:45397", + "issue:44361|issue:44749", + "issue:44336|issue:44373" + ], + "prepared_review_unit_hash": "5bb65fd47afc730aed473653770b2b739058a8552c5613ff782457137b9d477b", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12385, + "estimated_input_tokens": 3097, + "estimated_eval_tokens": 6450 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are a heterogeneous mix of unrelated bugs, regressions, docs issues, and performance reports. All proposed soft pairs look superficially similar by subsystem or wording, but they do not appear to describe the same underlying bug/change, so they should be rejected as duplicates.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue stands out; the set spans unrelated topics (tokenizers, pipelines, import/runtime errors, docs, and performance).", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue is suitable as an umbrella canonical because none of these reports plausibly subsumes the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44589", + "right": "issue:45362", + "accept": false, + "reason": "Both involve failures during model/tokenizer use, but one is a missing Float8 storage type and the other is a Qwen chat crash; different code paths and likely different root causes." + }, + { + "left": "issue:43901", + "right": "issue:44246", + "accept": false, + "reason": "Docs/behavior mismatch for TextClassificationPipeline is unrelated to intermittent slow import time." + }, + { + "left": "issue:44188", + "right": "issue:44336", + "accept": false, + "reason": "One is a torch.compile attention-kernel divergence bug; the other is terminal ANSI formatting in loading_report. Same library, different functionality." + }, + { + "left": "issue:41628", + "right": "issue:43976", + "accept": false, + "reason": "Missing AutoImageProcessor import and Python version compatibility regression are not the same defect." + }, + { + "left": "issue:44462", + "right": "issue:45356", + "accept": false, + "reason": "Both are tokenizer-related, but one is AutoTokenizer ignoring tokenizer.json and the other is a Kimi-K2.5 codec/regression issue; not the same underlying bug." + }, + { + "left": "issue:41950", + "right": "issue:44220", + "accept": false, + "reason": "Video-classification pipeline image-processor lookup and _torch_extract_fbank_features() are unrelated pipeline vs audio feature-extraction problems." + }, + { + "left": "issue:44297", + "right": "issue:45397", + "accept": false, + "reason": "Tokenizer class mismatch in save_pretrained and Gemma-4 zero3 from_pretrained crashes are different failure modes with different likely fixes." + }, + { + "left": "issue:44361", + "right": "issue:44749", + "accept": false, + "reason": "MLukeTokenizer AttributeError and a performance slowdown after version upgrade are not the same bug." + }, + { + "left": "issue:44336", + "right": "issue:44373", + "accept": false, + "reason": "ANSI color output in loading_report and a wrong docstring for position_ids are clearly separate documentation/UX issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 91, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36246", + "issue:42491", + "issue:43299", + "issue:43526", + "issue:43531", + "issue:43673", + "issue:43994", + "issue:44246", + "issue:44295", + "issue:44336", + "issue:44351", + "issue:44485", + "issue:44496", + "issue:44561", + "issue:45003", + "issue:45276", + "issue:45362", + "issue:45397" + ], + "soft_pairs": [ + "issue:42491|issue:43299", + "issue:44246|issue:44496", + "issue:36246|issue:44351", + "issue:43531|issue:45362", + "issue:44295|issue:45397", + "issue:43526|issue:45003", + "issue:43994|issue:45276", + "issue:44336|issue:44485", + "issue:43673|issue:44561" + ], + "prepared_review_unit_hash": "9bc9c74552ed2acff185a6401f4811e9604830c3b91f903afced120718f6c551", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13106, + "estimated_input_tokens": 3277, + "estimated_eval_tokens": 6810 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is mostly heterogeneous: several unrelated model-loading, generation, import, and UI/reporting bugs are grouped together, but the soft-linked pairs do not look like true duplicates. Most similarities are only at the level of shared model families or v5 regression wording.", + "confidence": 0.82, + "canonical_issue_reason": "No clear canonical duplicate stands out because the items span different bugs and code paths; the closest shared theme is v5/model-loading breakage, but not a single underlying issue.", + "canonical_pr_reason": null, + "best_issue_reason": "If one issue must represent the cluster, 43299 is the strongest concrete example of the recurring v5 regression theme (Qwen3/Qwen3-MoE loading breakage) and is more specific than the broader or more generic reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42491", + "right": "issue:43299", + "accept": false, + "reason": "Both mention Qwen3/MoE and hf5.x/v5 breakage, but one is about LoRA compatibility on qwen3_moe and the other about Qwen3VL MoE loading; different failure modes and likely different fixes." + }, + { + "left": "issue:44246", + "right": "issue:44496", + "accept": false, + "reason": "Unrelated bugs: import slowness vs. an unrecognized-model/config issue for a specific checkpoint." + }, + { + "left": "issue:36246", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but they involve different missing exports and different model subsystems; not the same bug." + }, + { + "left": "issue:43531", + "right": "issue:45362", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and Qwen3.5 chat crashes are different code paths and symptoms." + }, + { + "left": "issue:44295", + "right": "issue:45397", + "accept": false, + "reason": "A position_ids buffer read error is not the same as gemma-4 zero3/from_pretrained behavior." + }, + { + "left": "issue:43526", + "right": "issue:45003", + "accept": false, + "reason": "BeitImageProcessorFast reduce_labels bug and unsafe sys.modules access in modeling_utils are unrelated." + }, + { + "left": "issue:43994", + "right": "issue:45276", + "accept": false, + "reason": "SigLIP2 nonsensical outputs via AutoModel/pipeline and gemma4 embedding resizing are separate model-specific issues." + }, + { + "left": "issue:44336", + "right": "issue:44485", + "accept": false, + "reason": "ANSI code leakage in loading_report is unrelated to GLM-5 RoPE implementation." + }, + { + "left": "issue:43673", + "right": "issue:44561", + "accept": false, + "reason": "Both are v5 regressions, but one is about chunked_prefill cache handling and the other about removal of is_torch_fx_available; different breakages and fixes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 92, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36246", + "issue:42886", + "issue:43066", + "issue:43441", + "issue:43540", + "issue:43596", + "issue:44162", + "issue:44242", + "issue:44246", + "issue:44291", + "issue:44779", + "issue:44857", + "issue:44871", + "issue:44877", + "issue:45200", + "issue:45229", + "issue:45362", + "issue:45375" + ], + "soft_pairs": [ + "issue:44291|issue:45362", + "issue:44242|issue:44857", + "issue:43066|issue:44779", + "issue:44162|issue:45200", + "issue:36246|issue:43596", + "issue:44877|issue:45375", + "issue:42886|issue:45229", + "issue:43441|issue:43540", + "issue:44246|issue:44871" + ], + "prepared_review_unit_hash": "ef24e9141fda8460f192b9df806bce2c3b5627e3bd9378157212bdbb057601d7", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13119, + "estimated_input_tokens": 3280, + "estimated_eval_tokens": 6816 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a grab-bag of unrelated model-specific regressions and infra bugs. None of the soft pairs are strong duplicates, so I would not merge any of them into a single canonical issue.", + "confidence": 0.93, + "canonical_issue_reason": "No single canonical issue: the items span unrelated failures across tokenizers, config validation, multimodal models, training/runtime errors, and performance complaints.", + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44291", + "right": "issue:45362", + "accept": false, + "reason": "Both are crashes, but they affect different code paths and models: init_empty_weights/_is_hf_initialized versus Qwen3.5 chat generation. Similar symptom, not the same bug." + }, + { + "left": "issue:44242", + "right": "issue:44857", + "accept": false, + "reason": "Load-balancing loss omission in MoE routing and an AMP/CUDA crash in LwDetrImageLoss are unrelated defects in different subsystems." + }, + { + "left": "issue:43066", + "right": "issue:44779", + "accept": false, + "reason": "Both mention tokenizers in v5, but one is about decoder type handling while the other is DeepSeek tokenization correctness. Different model families and failure modes." + }, + { + "left": "issue:44162", + "right": "issue:45200", + "accept": false, + "reason": "ESM2 breakage and Gemma 4 mm_token_type_ids defaults are model-specific issues with different causes and fixes." + }, + { + "left": "issue:36246", + "right": "issue:43596", + "accept": false, + "reason": "An ImportError for Qwen2_5_VLImageProcessor is unrelated to a deepspeed zero3/BertModel index error." + }, + { + "left": "issue:44877", + "right": "issue:45375", + "accept": false, + "reason": "Both involve strict config handling, but they concern different models and different concrete symptoms (load failure vs. field silently dropped). Too broad to treat as the same bug." + }, + { + "left": "issue:42886", + "right": "issue:45229", + "accept": false, + "reason": "Offline tokenizer cache loading and Gemma4 multi-GPU CUDA OOM are unrelated runtime problems." + }, + { + "left": "issue:43441", + "right": "issue:43540", + "accept": false, + "reason": "FlashAttention failure for Ministral-3 and a Qwen3OmniMoe video-processing ValueError are different code paths and not mergeable as one fix." + }, + { + "left": "issue:44246", + "right": "issue:44871", + "accept": false, + "reason": "Import-time slowness and an eos_token_id config inconsistency are unrelated issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 93, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:41628", + "issue:41950", + "issue:43065", + "issue:43232", + "issue:43262", + "issue:43408", + "issue:43901", + "issue:44246", + "issue:44295", + "issue:44336", + "issue:44496", + "issue:44749", + "issue:44779", + "issue:44995", + "issue:45003", + "issue:45200", + "issue:45292" + ], + "soft_pairs": [ + "issue:41628|issue:41950", + "issue:44995|issue:45200", + "issue:44779|issue:45292", + "issue:43901|issue:45003", + "issue:43232|issue:44336", + "issue:44295|issue:44749", + "issue:43065|issue:43408", + "issue:43262|issue:44246", + "issue:44496|issue:45003" + ], + "prepared_review_unit_hash": "a803da39c18a9c266373d1325325bc2b1bdd3616fd2bbc5f241c48832a705d56", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12543, + "estimated_input_tokens": 3136, + "estimated_eval_tokens": 6528 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is not a duplicate set; it is a collection of unrelated bug reports across different models, pipelines, and utility code that only share broad Transformers vocabulary.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue stands out because the items describe distinct bugs in different subsystems/models rather than the same underlying defect.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a good global representative here; the titles and problem statements are too heterogeneous to safely collapse into one canonical bug report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "Cannot import name 'AutoImageProcessor' from 'transformers'", + "right": "video-classification pipeline looks for image processors", + "accept": false, + "reason": "Related area, but different failure modes: one is an import/export problem, the other is pipeline logic selecting the wrong processor type." + }, + { + "left": "[Bug] GlmMoeDsa crashes on second forward pass \u2014 stale indexer cache", + "right": "[Gemma 4] mm_token_type_ids required for text-only fine-tuning - should default to zeros", + "accept": false, + "reason": "Completely different models and code paths; one is a cache/state bug, the other is a Gemma input-defaulting issue." + }, + { + "left": "Deepseek tokenizer produces incorrect results as of v5 (works in v4)", + "right": "resize_token_embeddings does not effect to output_embeddings", + "accept": false, + "reason": "Tokenizer correctness and embedding-resize behavior are separate bugs with no shared underlying change." + }, + { + "left": "TextClassificationPipeline docs still mention return_all_scores, but behavior differs", + "right": "modeling_utils unsafely accesses sys.modules[]", + "accept": false, + "reason": "Documentation mismatch versus import-time/module-access safety are unrelated." + }, + { + "left": "_update_model_kwargs_for_generation after sync_gpus when generation", + "right": "Some ANSI codes are generated by utils/loading_report even when not connected to terminal", + "accept": false, + "reason": "Different subsystems and symptoms; generation-state handling is unrelated to terminal-color output." + }, + { + "left": "An error occurs when reading position_ids after registering it as a buffer.", + "right": "Transformer \u4ece4.57.3 \u5347\u7ea7\u52305.3.0 \u540e\u8fc7\u6ee4\u6570\u636e\u65f6\u957f\u53d8\u6162\u5341\u500d\u4ee5\u4e0a", + "accept": false, + "reason": "A tensor/buffer access error is not the same underlying issue as a performance regression after upgrade." + }, + { + "left": "Dummy `nn.Conv2d` in `Sam3PixelDecoder`", + "right": "Warning: You are using a model of type sam3_video to instantiate a model of type sam3_tracker", + "accept": false, + "reason": "Both touch SAM3, but one is an implementation placeholder and the other is a config/model-type mismatch warning." + }, + { + "left": "Audio processors: `apply_chat_template()` defaults to 16kHz sampling rate, even if the processor config sets a different value", + "right": "import transformers takes long sometimes", + "accept": false, + "reason": "Audio preprocessing default-rate handling is unrelated to slow import performance." + }, + { + "left": "ValueError: Unrecognized model in allenai/Olmo-Hybrid-Instruct-SFT-7B. Should have a `model_type` key in its config.json.", + "right": "modeling_utils unsafely accesses sys.modules[]", + "accept": false, + "reason": "One is a model config loading error, the other is an internal module lookup safety issue; they do not indicate the same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 94, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:43257", + "issue:43295", + "issue:43475", + "issue:43540", + "issue:43653", + "issue:43673", + "issue:44060", + "issue:44077", + "issue:44368", + "issue:44466", + "issue:44734", + "issue:44749", + "issue:44869", + "issue:44977", + "issue:45137", + "issue:45292", + "issue:45335", + "issue:45356" + ], + "soft_pairs": [ + "issue:43257|issue:45137", + "issue:43475|issue:43540", + "issue:43295|issue:45292", + "issue:44060|issue:44077", + "issue:43673|issue:44734", + "issue:44869|issue:45356", + "issue:44368|issue:44977", + "issue:44466|issue:45335", + "issue:43653|issue:44749" + ], + "prepared_review_unit_hash": "7b0b8178b7c40298509f1e3a6beff283daa2d4c4dcfb6abb3c266d58d2225c2f", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13408, + "estimated_input_tokens": 3352, + "estimated_eval_tokens": 6960 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly unrelated bug reports that only share broad subsystem keywords (tokenizers, embeddings, cache, DeepSpeed, Qwen). None of the soft pairs looks like the same underlying issue, so no duplicate merges are justified.", + "confidence": 0.95, + "canonical_issue_reason": "issue:44466 is the most representative standalone bug report: it has a clear regression framing, a concrete observable failure, and enough detail to diagnose tied-weight serialization behavior across versions/devices.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44466 is the strongest issue to keep as the representative example because it is specific, reproducible, and narrowly scoped while still clearly describing the failure mode.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43257", + "right": "issue:45137", + "accept": false, + "reason": "Both mention DeepSpeed, but one is Qwen3 MOE weight conversion/loading and the other is a ZeRO3 deque IndexError; different failure paths." + }, + { + "left": "issue:43475", + "right": "issue:43540", + "accept": false, + "reason": "Different models and different bugs: missing SAM3 vision encoder attribute vs Qwen3OmniMoe video input ValueError." + }, + { + "left": "issue:43295", + "right": "issue:45292", + "accept": false, + "reason": "Both touch processor/tokenizer or embeddings, but one is a processor.tokenizer regression and the other is resize_token_embeddings not updating output embeddings." + }, + { + "left": "issue:44060", + "right": "issue:44077", + "accept": false, + "reason": "Unrelated model families and symptoms; a tied-weights warning in Qwen3-Next is not the same as patchtsmixer post_init gating." + }, + { + "left": "issue:43673", + "right": "issue:44734", + "accept": false, + "reason": "Both are cache-related, but one is GenerationMixin cache missing in chunked_prefill and the other is a serve-time KV continuation tensor indexing crash." + }, + { + "left": "issue:44869", + "right": "issue:45356", + "accept": false, + "reason": "Both involve decoding/tokenization regressions, but they affect different models and different failure mechanisms." + }, + { + "left": "issue:44368", + "right": "issue:44977", + "accept": false, + "reason": "One is a tie_word_embeddings warning during Qwen3.5 LoRA fine-tuning; the other is a flash-attention generation failure. Not the same bug." + }, + { + "left": "issue:44466", + "right": "issue:45335", + "accept": false, + "reason": "Both concern embedding weights, but one is inconsistent lm_head serialization by device and the other is resize_token_embeddings not propagating to decoder.embed_tokens." + }, + { + "left": "issue:43653", + "right": "issue:44749", + "accept": false, + "reason": "A BigBirdTokenizer special-token bug and a performance regression in filtering after upgrade are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 95, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:42673", + "issue:43232", + "issue:43381", + "issue:43441", + "issue:43606", + "issue:43646", + "issue:43673", + "issue:43704", + "issue:43931", + "issue:44162", + "issue:44297", + "issue:44368", + "issue:44442", + "issue:44623", + "issue:44749", + "issue:45278", + "issue:45292" + ], + "soft_pairs": [ + "issue:44442|issue:44749", + "issue:43232|issue:43673", + "issue:43931|issue:44368", + "issue:43441|issue:43606", + "issue:43673|issue:45278", + "issue:43646|issue:44162", + "issue:42673|issue:43704", + "issue:44297|issue:44623", + "issue:43381|issue:45292" + ], + "prepared_review_unit_hash": "1b1c16262ffe0636a47cf313090534a94cce0e4fbad008bf8148dcf511604eeb", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12506, + "estimated_input_tokens": 3127, + "estimated_eval_tokens": 6510 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This set is mostly heterogeneous. The only clear duplicate pair is the Qwen3ForCausalLM VRAM leak report (42673 \u2194 43704); the other soft pairs share broad Transformers areas but describe different bugs and should stay separate.", + "confidence": 0.92, + "canonical_issue_reason": "issue:43704 is the best canonical issue: it is the newer/open instance of the exact same Qwen3ForCausalLM VRAM leak reported in 42673, with identical title and symptom.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43704 is the strongest representative of the only true duplicate pair in the set; the rest of the items are unrelated or only loosely similar.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44442", + "right": "issue:44749", + "accept": false, + "reason": "Both mention tokenizer/model behavior, but one is a FastSpeech2ConformerTokenizer loading failure and the other is a slowdown after upgrading Transformers; different bugs and symptoms." + }, + { + "left": "issue:43232", + "right": "issue:43673", + "accept": false, + "reason": "Both involve generation internals, but one is about _update_model_kwargs_for_generation after sync_gpus and the other is a missing GenerationMixin cache in chunked_prefill; not the same code-path problem." + }, + { + "left": "issue:43931", + "right": "issue:44368", + "accept": false, + "reason": "Qwen3-VL weight-shape mismatch and a tie_word_embeddings warning during LoRA fine-tuning are distinct model/config issues, not duplicates." + }, + { + "left": "issue:43441", + "right": "issue:43606", + "accept": false, + "reason": "FlashAttention failure for Ministral-3 and CPU offload device-mismatch for bark-small are different model/backend failures." + }, + { + "left": "issue:43673", + "right": "issue:45278", + "accept": false, + "reason": "A missing cache during chunked_prefill is a specific generation bug; the other report is broad import errors after upgrading versions." + }, + { + "left": "issue:43646", + "right": "issue:44162", + "accept": false, + "reason": "Custom model initialization breakage in Transformers 5.0.0 is not the same as ESM2 being broken; different affected paths and symptoms." + }, + { + "left": "issue:42673", + "right": "issue:43704", + "accept": true, + "reason": "Same exact title and same VRAM leak in Qwen3ForCausalLM when used across multiple dataloader threads; clearly the same underlying bug." + }, + { + "left": "issue:44297", + "right": "issue:44623", + "accept": false, + "reason": "Tokenizer.save_pretrained writing the wrong tokenizer_class and processor.save_pretrained missing files are both serialization bugs but affect different objects and failure modes." + }, + { + "left": "issue:43381", + "right": "issue:45292", + "accept": false, + "reason": "Gradient checkpointing in eval mode and resize_token_embeddings not updating output embeddings are unrelated model API bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 96, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:28282", + "issue:30990", + "issue:36246", + "issue:41628", + "issue:42371", + "issue:42898", + "issue:43381", + "issue:43452", + "issue:43475", + "issue:43824", + "issue:44162", + "issue:44933", + "issue:45003", + "issue:45081", + "issue:45092", + "issue:45103", + "issue:45335", + "issue:45381" + ], + "soft_pairs": [ + "issue:28282|issue:30990", + "issue:42898|issue:44162", + "issue:43381|issue:45335", + "issue:36246|issue:43824", + "issue:43452|issue:45003", + "issue:41628|issue:44933", + "issue:42371|issue:45092", + "issue:43475|issue:45381", + "issue:45081|issue:45103" + ], + "prepared_review_unit_hash": "f531cbfe87e1c77b0dd7c8fcbf49262a30095d4cbd2baef980de1374b4a4ebeb", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13255, + "estimated_input_tokens": 3314, + "estimated_eval_tokens": 6884 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are mostly unrelated bugs; the only plausible duplicate pair is the two Qwen2.5-VL import/export failures, which likely share the same missing symbol export issue. The rest are different code paths or different features entirely.", + "confidence": 0.74, + "canonical_issue_reason": "Issue 43824 is the strongest representative of the one plausible duplicate theme: Qwen2.5-VL import/export breakage. It is also broader than the submodule-specific image-processor report.", + "canonical_pr_reason": null, + "best_issue_reason": "43824 is the best issue to anchor the cluster because it is the clearest, most central instance of the Qwen2.5-VL import regression and has the most downstream attention.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:28282", + "right": "issue:30990", + "accept": false, + "reason": "Different failures: missing PyTorch dependency vs. hanging Sentence Transformers load. Not the same underlying bug." + }, + { + "left": "issue:42898", + "right": "issue:44162", + "accept": false, + "reason": "A tokenizer cleanup deprecation/change issue is unrelated to an ESM2 model breakage." + }, + { + "left": "issue:43381", + "right": "issue:45335", + "accept": false, + "reason": "Gradient checkpointing in eval mode and t5gemma embedding resize are separate behaviors in different code paths." + }, + { + "left": "issue:36246", + "right": "issue:43824", + "accept": true, + "reason": "Both report Qwen2.5-VL import/export failures for missing symbols from transformers; this looks like the same broken export surface for the same model family." + }, + { + "left": "issue:43452", + "right": "issue:45003", + "accept": false, + "reason": "gguf_file/from_pretrained loading problems and unsafe sys.modules access are not the same concrete bug." + }, + { + "left": "issue:41628", + "right": "issue:44933", + "accept": false, + "reason": "Both are import-related, but they cite different missing symbols and different modules; too broad to be the same bug." + }, + { + "left": "issue:42371", + "right": "issue:45092", + "accept": false, + "reason": "TF32 API guidance and InternVL2 meta-init incompatibility are unrelated issues." + }, + { + "left": "issue:43475", + "right": "issue:45381", + "accept": false, + "reason": "SAM3 missing attribute and Qwen2.5-VL video position-id mismatch are different model-specific bugs." + }, + { + "left": "issue:45081", + "right": "issue:45103", + "accept": false, + "reason": "Mistral regex patch crash and auto_docstring annotation crash affect different subsystems and failure modes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 97, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:42222", + "issue:42994", + "issue:43232", + "issue:43475", + "issue:43525", + "issue:43643", + "issue:43873", + "issue:44038", + "issue:44188", + "issue:44623", + "issue:44661", + "issue:44945", + "issue:45200", + "issue:45230", + "issue:45310", + "issue:45362", + "issue:45375", + "issue:45397" + ], + "soft_pairs": [ + "issue:42222|issue:45200", + "issue:43475|issue:43643", + "issue:42994|issue:43525", + "issue:45310|issue:45375", + "issue:44038|issue:44623", + "issue:43873|issue:44945", + "issue:45230|issue:45397", + "issue:43475|issue:44661", + "issue:45362|issue:45375", + "issue:43232|issue:44188" + ], + "prepared_review_unit_hash": "554f57419acd39c8de2822115424480ac411ecda4c4b552f90496bd1951d7858", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13130, + "estimated_input_tokens": 3283, + "estimated_eval_tokens": 6822 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is mostly heterogeneous; only one pair appears to be the same underlying Qwen3.5 MoE config-loading bug. The rest are similar at a high level (missing fields, saving, quantization, generation) but not the same concrete issue.", + "confidence": 0.78, + "canonical_issue_reason": "issue:45375 is the most root-cause-specific report in the only likely duplicate pair: it names the missing `deepstack_visual_indexes` field and the `@strict` drop behavior, which likely explains the broader from_pretrained failure.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45310 is the clearest user-facing symptom report for the Qwen3.5 MoE loading failure and includes the version context (`transformers>=5.4.0`), making it the best top-level representative issue.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42222", + "right": "issue:45200", + "accept": false, + "reason": "Different models and different failure modes; one is a VitPose loading breakage, the other is Gemma 4 text-only finetuning needing default token type ids." + }, + { + "left": "issue:43475", + "right": "issue:43643", + "accept": false, + "reason": "Both involve missing attributes/fields, but the affected objects and code paths differ: SAM 3 video output vs AutoConfig remote-code loading." + }, + { + "left": "issue:42994", + "right": "issue:43525", + "accept": false, + "reason": "Quantized-model save failure is unrelated to a Llama4 config missing `pad_token_id`; these are not the same bug." + }, + { + "left": "issue:45310", + "right": "issue:45375", + "accept": true, + "reason": "Both point to the same Qwen3.5 MoE config-loading/from_pretrained path; the missing `deepstack_visual_indexes` field is a plausible root cause of the broader from_pretrained error." + }, + { + "left": "issue:44038", + "right": "issue:44623", + "accept": false, + "reason": "Same broad area of model/processor handling, but the concrete problems differ: Qwen3-VL-Moe bug vs `processor.save_pretrained(...)` missing files." + }, + { + "left": "issue:43873", + "right": "issue:44945", + "accept": false, + "reason": "Quantization/offloading behavior and pipeline-parallel output correctness are different runtime problems and not the same underlying change or fix." + }, + { + "left": "issue:45230", + "right": "issue:45397", + "accept": false, + "reason": "`Bug report` is too generic to match the specific Gemma-4 ZeRO3 from_pretrained failure; no clear same-bug evidence." + }, + { + "left": "issue:43475", + "right": "issue:44661", + "accept": false, + "reason": "Both touch model/config plumbing, but one is a missing vision-model attribute and the other is a registry/mapping failure for add-new-model-like." + }, + { + "left": "issue:45362", + "right": "issue:45375", + "accept": false, + "reason": "Qwen3.5 chat crashes and a missing vision-config field are related only by model family, not by the same concrete bug." + }, + { + "left": "issue:43232", + "right": "issue:44188", + "accept": false, + "reason": "Generation kwarg updates after sync_gpus and attention-kernel divergence under torch.compile are unrelated code paths." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 98, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:30990", + "issue:35141", + "issue:36010", + "issue:41628", + "issue:42831", + "issue:42994", + "issue:43232", + "issue:43525", + "issue:43673", + "issue:44164", + "issue:44188", + "issue:44222", + "issue:44351", + "issue:44792", + "issue:45310", + "issue:45375", + "issue:45397" + ], + "soft_pairs": [ + "issue:41628|issue:44351", + "issue:44222|issue:45310", + "issue:42831|issue:44222", + "issue:42994|issue:44164", + "issue:36010|issue:41628", + "issue:43525|issue:45375", + "issue:45310|issue:45397", + "issue:43673|issue:44188", + "issue:30990|issue:35141", + "issue:43232|issue:44792" + ], + "prepared_review_unit_hash": "efe7471c6ccda5481ec6857e889a29de41c0bbfc1f0610ba7f4d698046fc3c15", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12581, + "estimated_input_tokens": 3146, + "estimated_eval_tokens": 6548 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is not a duplicate set: the items span unrelated import errors, generation/cache bugs, config-field regressions, and serialization/saving issues. All soft edges should be rejected.", + "confidence": 0.96, + "canonical_issue_reason": "No single issue cleanly represents the cluster because the titles point to separate bugs; if forced, issue 44164 is the broadest and most detailed serialization-related report, but it is not a true duplicate hub.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44164 is the strongest standalone anchor by specificity and diagnostic detail, but it should not be treated as canonical for the whole cluster since the rest are unrelated.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41628", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but they involve different symbols (`AutoImageProcessor` vs `HybridCache`) and likely different breakages." + }, + { + "left": "issue:44222", + "right": "issue:45310", + "accept": false, + "reason": "Both mention MoE/from_pretrained or saving/loading, but one is an FP8 save_pretrained bug and the other is a Qwen3.5 from_pretrained error; different code paths." + }, + { + "left": "issue:42831", + "right": "issue:44222", + "accept": false, + "reason": "One is an FP8 accuracy regression, the other is an FP8 save_pretrained failure; same broad area, not the same bug." + }, + { + "left": "issue:42994", + "right": "issue:44164", + "accept": false, + "reason": "Both concern model saving/loading, but `quantized model saving failed` and `extra_state` handling are distinct serialization failures." + }, + { + "left": "issue:36010", + "right": "issue:41628", + "accept": false, + "reason": "Different import targets (`GenerationMixin` vs `AutoImageProcessor`) and different failure points." + }, + { + "left": "issue:43525", + "right": "issue:45375", + "accept": false, + "reason": "Both are config/schema issues, but they involve different models and different missing fields; not the same underlying defect." + }, + { + "left": "issue:45310", + "right": "issue:45397", + "accept": false, + "reason": "Both are `from_pretrained` errors, but they affect different model families and likely different loaders/failure causes." + }, + { + "left": "issue:43673", + "right": "issue:44188", + "accept": false, + "reason": "One is a missing GenerationMixin cache in chunked prefill, the other is attention-kernel divergence under `torch.compile`; unrelated code paths." + }, + { + "left": "issue:30990", + "right": "issue:35141", + "accept": false, + "reason": "A loading hang for Sentence Transformers is unrelated to embedding reinitialization after resizing token embeddings." + }, + { + "left": "issue:43232", + "right": "issue:44792", + "accept": false, + "reason": "Generation kwargs handling after `sync_gpus` is unrelated to a Janus image-generation test failure." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 99, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:42175", + "issue:42898", + "issue:43377", + "issue:43479", + "issue:43784", + "issue:43824", + "issue:43976", + "issue:44038", + "issue:44060", + "issue:44079", + "issue:44188", + "issue:44242", + "issue:44373", + "issue:44568", + "issue:44869", + "issue:44933", + "issue:45125", + "issue:45310" + ], + "soft_pairs": [ + "issue:43377|issue:44242", + "issue:43479|issue:44079", + "issue:44038|issue:45310", + "issue:42175|issue:44933", + "issue:43479|issue:44869", + "issue:42898|issue:44568", + "issue:43976|issue:44933", + "issue:43784|issue:43824", + "issue:44188|issue:44373", + "issue:44060|issue:45125" + ], + "prepared_review_unit_hash": "3d2d262e8e1d3caf6c2045c51467bf557b6ea294c57bc0acd5d15833b0dce755", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13415, + "estimated_input_tokens": 3354, + "estimated_eval_tokens": 6964 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is heterogeneous: none of the soft pairs appear to be the same underlying bug or change, so there is no clear duplicate group or canonical artifact here.", + "confidence": 0.93, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43377", + "right": "issue:44242", + "accept": false, + "reason": "Different failure modes in different model paths: MIMI batching/padding-mask output divergence vs MoE router load-balancing loss gating." + }, + { + "left": "issue:43479", + "right": "issue:44079", + "accept": false, + "reason": "Both mention None-handling, but one is multimodal config default initialization and the other is ModelOutput key assignment logic; not the same code path." + }, + { + "left": "issue:44038", + "right": "issue:45310", + "accept": false, + "reason": "Both involve Qwen VL/MoE models, but the reported breakages are on different model variants and different version ranges, with no clear shared root cause." + }, + { + "left": "issue:42175", + "right": "issue:44933", + "accept": false, + "reason": "Unrelated topics: pip extras/backend dependency installation vs a missing import from image_utils." + }, + { + "left": "issue:43479", + "right": "issue:44869", + "accept": false, + "reason": "Config defaulting bug in Phi4MultimodalConfig vs Whisper word-timestamp decode crash; unrelated subsystems." + }, + { + "left": "issue:42898", + "right": "issue:44568", + "accept": false, + "reason": "Both are tokenizer/behavior regressions, but one is about clean_up_tokenization_spaces and the other about add_special_tokens/BOS-EOS insertion in a specific tokenizer." + }, + { + "left": "issue:43976", + "right": "issue:44933", + "accept": false, + "reason": "Python version compatibility issue vs missing image_utils import; no shared bug." + }, + { + "left": "issue:43784", + "right": "issue:43824", + "accept": false, + "reason": "Import-time failures with different causes: missing nn in sentence-transformers integration vs missing Qwen2.5 VL export." + }, + { + "left": "issue:44188", + "right": "issue:44373", + "accept": false, + "reason": "torch.compile attention-kernel divergence is unrelated to a wrong docstring for position_ids." + }, + { + "left": "issue:44060", + "right": "issue:45125", + "accept": false, + "reason": "Both touch Qwen3-family models, but one is a tied-weights warning and the other is missing tensor-parallel plan metadata; different defects." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 100, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:42898", + "issue:43278", + "issue:43723", + "issue:43874", + "issue:44077", + "issue:44242", + "issue:44448", + "issue:44568", + "issue:44623", + "issue:44734", + "issue:44779", + "issue:44964", + "issue:45092", + "issue:45103", + "issue:45137", + "issue:45276", + "issue:45310", + "issue:45356" + ], + "soft_pairs": [ + "issue:44964|issue:45137", + "issue:44734|issue:45103", + "issue:43723|issue:44568", + "issue:42898|issue:45356", + "issue:43278|issue:45137", + "issue:44623|issue:45310", + "issue:43874|issue:45092", + "issue:42898|issue:44779", + "issue:44077|issue:44242", + "issue:42898|issue:44448", + "issue:45137|issue:45276" + ], + "prepared_review_unit_hash": "2dfb78da1cd958b8bd019f072869ce95fce0f410a177e9e289db1409ee60b2c9", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13855, + "estimated_input_tokens": 3464, + "estimated_eval_tokens": 7184 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are mostly unrelated regression reports grouped only by broad Transformers v5 context; I don\u2019t see any true duplicate pairs among the soft candidates. The closest thing to a cluster-wide umbrella is the general v5 behavior-change issue, but it is not a duplicate of the others.", + "confidence": 0.31, + "canonical_issue_reason": "issue:42898 is the broadest umbrella for v5 behavior changes, but it does not match the specific bugs in the other issues.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:42898 is the best representative only in the sense that it is the most general v5 regression umbrella; it is not a precise duplicate target.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44964", + "right": "issue:45137", + "accept": false, + "reason": "Different failures: model loading for Phi-4 multimodal vs DeepSpeed ZeRO3 deque IndexError." + }, + { + "left": "issue:44734", + "right": "issue:45103", + "accept": false, + "reason": "Serve/KV-cache tensor indexing bug and auto-docstring annotations crash are unrelated code paths." + }, + { + "left": "issue:43723", + "right": "issue:44568", + "accept": false, + "reason": "Both are tokenizer regressions, but for different models and different symptoms; not the same underlying bug." + }, + { + "left": "issue:42898", + "right": "issue:45356", + "accept": false, + "reason": "Generic v5 tokenizer behavior change vs Kimi-K2.5 codec/regex regression; too different to merge." + }, + { + "left": "issue:43278", + "right": "issue:45137", + "accept": false, + "reason": "Embedding dtype mismatch in eval is unrelated to ZeRO3 deque underflow." + }, + { + "left": "issue:44623", + "right": "issue:45310", + "accept": false, + "reason": "Processor save-pretrained file omissions and Qwen3.5 from_pretrained failure are different load/save problems." + }, + { + "left": "issue:43874", + "right": "issue:45092", + "accept": false, + "reason": "Missing image-patch method and remote-code/meta-init incompatibility are distinct multimodal issues." + }, + { + "left": "issue:42898", + "right": "issue:44779", + "accept": false, + "reason": "Both mention tokenizer behavior in v5, but they concern different tokenizers and different regressions." + }, + { + "left": "issue:44077", + "right": "issue:44242", + "accept": false, + "reason": "PatchTSMixer post_init API restriction and MoE router-loss omission are unrelated model behavior changes." + }, + { + "left": "issue:42898", + "right": "issue:44448", + "accept": false, + "reason": "A broad v5 output-change report is not the same bug as Pegasus-specific output divergence." + }, + { + "left": "issue:45137", + "right": "issue:45276", + "accept": false, + "reason": "ZeRO3 deque error and Gemma4 resize_token_embeddings propagation bug do not share a concrete code path." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 101, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:42831", + "issue:42898", + "issue:43295", + "issue:43479", + "issue:43644", + "issue:43723", + "issue:44206", + "issue:44222", + "issue:44263", + "issue:44297", + "issue:44360", + "issue:44483", + "issue:44484", + "issue:44623", + "issue:44977", + "issue:45103", + "issue:45406" + ], + "soft_pairs": [ + "issue:42898|issue:43295", + "issue:43479|issue:43644", + "issue:44206|issue:45103", + "issue:44222|issue:44297", + "issue:44222|issue:44484", + "issue:44483|issue:45406", + "issue:42831|issue:44623", + "issue:44222|issue:44623", + "issue:44263|issue:44360", + "issue:43723|issue:44977" + ], + "prepared_review_unit_hash": "9ec99ff313efef4311d5d6491977aeb5786401337ff0c8289994bfa5e551423a", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12755, + "estimated_input_tokens": 3189, + "estimated_eval_tokens": 6634 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "All soft links look like false positives: the issues share broad terms like v5, tokenizer, or save_pretrained, but they describe different concrete regressions and code paths. No single duplicate canonical emerges from this set.", + "confidence": 0.93, + "canonical_issue_reason": "No clear canonical issue: the cluster is heterogeneous across tokenizer behavior, multimodal config, serve/runtime crashes, FP8 serialization, and DSA indexer bugs.", + "canonical_pr_reason": null, + "best_issue_reason": "If forced to pick the closest hub, issue:44623 is the most general save_pretrained serialization report, but it still does not subsume the other bugs.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42898", + "right": "issue:43295", + "accept": false, + "reason": "Both are v5/tokenizer regressions, but one is clean_up_tokenization_spaces behavior and the other is processor.tokenizer/image handling; different bugs." + }, + { + "left": "issue:43479", + "right": "issue:43644", + "accept": false, + "reason": "Phi4MultimodalConfig None-handling is unrelated to non-persistent buffer junk during v5 model loading." + }, + { + "left": "issue:44206", + "right": "issue:45103", + "accept": false, + "reason": "LasrFeatureExtractor center-arg crash and _process_kwargs_parameters AttributeError are separate code paths with different failure modes." + }, + { + "left": "issue:44222", + "right": "issue:44297", + "accept": false, + "reason": "FP8 MoE save_pretrained bug and tokenizer_class mismatch in tokenizer_config.json are both save_pretrained-related but not the same underlying defect." + }, + { + "left": "issue:44222", + "right": "issue:44484", + "accept": false, + "reason": "One concerns FP8 MoE model saving; the other is a max_shard_size default question. Same API, different issue." + }, + { + "left": "issue:44483", + "right": "issue:45406", + "accept": false, + "reason": "OpenAI-style /v1/chat/completions request rejection is unrelated to Gemma4Processor missing _tokenizer in serve." + }, + { + "left": "issue:42831", + "right": "issue:44623", + "accept": false, + "reason": "FineGrainedFP8 accuracy loss is a model-quality bug, while processor.save_pretrained missing files is a serialization output bug." + }, + { + "left": "issue:44222", + "right": "issue:44623", + "accept": false, + "reason": "Both mention save_pretrained, but FP8 MoE saving and processor file omission are different concrete failures." + }, + { + "left": "issue:44263", + "right": "issue:44360", + "accept": false, + "reason": "Both involve the DSA indexer, but torch.split return values and missing ReLU describe distinct fixes." + }, + { + "left": "issue:43723", + "right": "issue:44977", + "accept": false, + "reason": "Tokenizer loading in v5 and Qwen3.5 flash-attention generation failure are different regressions despite both affecting model use." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 102, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:22355", + "issue:28282", + "issue:43479", + "issue:43606", + "issue:43723", + "issue:44261", + "issue:44263", + "issue:44297", + "issue:44373", + "issue:44485", + "issue:44514", + "issue:44623", + "issue:44871", + "issue:44913", + "issue:44991", + "issue:45092", + "issue:45103", + "issue:45310" + ], + "soft_pairs": [ + "issue:22355|issue:28282", + "issue:44263|issue:45092", + "issue:44514|issue:45103", + "issue:44297|issue:45310", + "issue:43606|issue:44623", + "issue:43479|issue:44913", + "issue:43723|issue:44871", + "issue:44261|issue:45092", + "issue:43723|issue:44991", + "issue:44373|issue:44485" + ], + "prepared_review_unit_hash": "3b660638696dac22ad3db95f2a95cca35035851f6819f44c2f3c7d3975404b29", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13425, + "estimated_input_tokens": 3357, + "estimated_eval_tokens": 6970 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The items are mostly unrelated issues with only superficial topical overlap. All soft-edge pairs should be rejected; none appear to describe the same underlying bug or fixable code path. The best representative issue is the broad tokenizer-loading regression in Transformers v5.", + "confidence": 0.86, + "canonical_issue_reason": "issue:43723 is the broadest and most central regression report in the set: it describes a v5 tokenizer loading failure at the core AutoTokenizer.from_pretrained path, which is more representative than the model-specific or config-specific reports.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43723 is the best single issue to anchor the set because it is a clear, general regression with a core API surface and likely relates to multiple downstream tokenizer-loading complaints.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:22355", + "right": "issue:28282", + "accept": false, + "reason": "Both are import errors, but they fail for different missing components: transformers.onnx vs PyTorch for AutoModel. Not the same bug or code path." + }, + { + "left": "issue:44263", + "right": "issue:45092", + "accept": false, + "reason": "Different model families and different failure modes: torch.split return handling in GLM MoE indexing vs meta-init incompatibility in InternVL2." + }, + { + "left": "issue:44514", + "right": "issue:45103", + "accept": false, + "reason": "One is a processor batching crash in Qwen2.5-VL; the other is an auto-docstring AttributeError triggered by future annotations. Unrelated code paths." + }, + { + "left": "issue:44297", + "right": "issue:45310", + "accept": false, + "reason": "Both mention tokenizer/model loading, but one is save_pretrained metadata mismatch and the other is a Qwen3.5 MoE from_pretrained regression. Too different to treat as the same fix." + }, + { + "left": "issue:43606", + "right": "issue:44623", + "accept": false, + "reason": "CPU offload device mismatch for bark-small is unrelated to processor.save_pretrained missing files." + }, + { + "left": "issue:43479", + "right": "issue:44913", + "accept": false, + "reason": "Both concern config round-tripping/defaults, but they involve different model configs and different symptoms; not the same underlying bug." + }, + { + "left": "issue:43723", + "right": "issue:44871", + "accept": false, + "reason": "Tokenizer loading regression vs eos_token_id config mismatch; related to model loading broadly, but not the same concrete issue." + }, + { + "left": "issue:44261", + "right": "issue:45092", + "accept": false, + "reason": "Different models and failures: missing rms_norm_eps precision issue vs old InternVL2 remote-code/meta-init incompatibility." + }, + { + "left": "issue:43723", + "right": "issue:44991", + "accept": false, + "reason": "Both are tokenizer-loading complaints, but one is a general v5 AutoTokenizer regression and the other is a specific EMBEDDIA/est-roberta failure. Not enough evidence they are the same bug." + }, + { + "left": "issue:44373", + "right": "issue:44485", + "accept": false, + "reason": "Docstring text error vs RoPE implementation discussion. They are not the same bug or change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 103, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:30990", + "issue:38175", + "issue:41628", + "issue:42831", + "issue:42886", + "issue:42994", + "issue:43576", + "issue:43723", + "issue:43784", + "issue:43824", + "issue:44263", + "issue:44295", + "issue:44373", + "issue:44442", + "issue:44623", + "issue:45092", + "issue:45310", + "issue:45357" + ], + "soft_pairs": [ + "issue:45092|issue:45357", + "issue:42886|issue:42994", + "issue:43723|issue:44442", + "issue:30990|issue:38175", + "issue:41628|issue:43824", + "issue:44295|issue:44623", + "issue:42831|issue:45310", + "issue:43576|issue:43784", + "issue:44263|issue:44373" + ], + "prepared_review_unit_hash": "5db36c7429d1d011386745237a70d72ba31eb62d46c9d3675039734a5927bc8a", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 12891, + "estimated_input_tokens": 3223, + "estimated_eval_tokens": 6702 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are superficially similar Transformers-related reports, but the soft pairs point to different bugs/code paths rather than duplicates. I would keep them separate.", + "confidence": 0.82, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45092", + "right": "issue:45357", + "accept": false, + "reason": "Both involve Qwen/vision checkpoint handling, but one is meta-init incompatibility for old InternVL2 remote-code checkpoints and the other is a save_pretrained regression with wrong visual encoder keys; different failure modes." + }, + { + "left": "issue:42886", + "right": "issue:42994", + "accept": false, + "reason": "Offline tokenizer cache loading and quantized model saving are unrelated code paths and symptoms." + }, + { + "left": "issue:43723", + "right": "issue:44442", + "accept": false, + "reason": "Both are tokenizer-loading problems, but they affect different tokenizer classes and likely distinct registration/load issues." + }, + { + "left": "issue:30990", + "right": "issue:38175", + "accept": false, + "reason": "Sentence-Transformers loading hang and zero probabilities on SigLIP2 are unrelated model/runtime bugs." + }, + { + "left": "issue:41628", + "right": "issue:43824", + "accept": false, + "reason": "Both are import errors, but for different missing symbols and different model families; not the same underlying bug." + }, + { + "left": "issue:44295", + "right": "issue:44623", + "accept": false, + "reason": "Position_ids buffer access and processor.save_pretrained missing files are separate issues in different parts of the stack." + }, + { + "left": "issue:42831", + "right": "issue:45310", + "accept": false, + "reason": "FineGrainedFP8 accuracy regression and Qwen3.5 from_pretrained failure are not the same problem." + }, + { + "left": "issue:43576", + "right": "issue:43784", + "accept": false, + "reason": "Broken env command in v5 and sentence-transformers import NameError are different regressions, even if both mention v5." + }, + { + "left": "issue:44263", + "right": "issue:44373", + "accept": false, + "reason": "A torch.split shape/return-value issue in GlmMoeDsaIndexer is unrelated to a docstring typo about position_ids." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 104, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:30990", + "issue:33357", + "issue:42371", + "issue:42831", + "issue:42886", + "issue:42907", + "issue:43066", + "issue:43122", + "issue:43232", + "issue:44038", + "issue:44117", + "issue:44263", + "issue:44514", + "issue:44521", + "issue:44704", + "issue:44945", + "issue:45245", + "issue:45290" + ], + "soft_pairs": [ + "issue:43122|issue:44521", + "issue:42886|issue:43066", + "issue:42831|issue:44038", + "issue:44117|issue:44704", + "issue:30990|issue:33357", + "issue:43122|issue:44514", + "issue:42371|issue:44263", + "issue:42907|issue:44945", + "issue:44704|issue:45290", + "issue:43232|issue:45245" + ], + "prepared_review_unit_hash": "9f2b188a344c7bf4cff9328dda1eac8545ab96edec30b56d1e8bd5c6241afe54", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13155, + "estimated_input_tokens": 3289, + "estimated_eval_tokens": 6834 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are mostly unrelated one-off bugs across tokenization, chat templating, generation, processor loading, and model-specific regressions. There isn\u2019t a strong duplicate cluster here, so no soft pair should be merged.", + "confidence": 0.84, + "canonical_issue_reason": "If one issue must anchor the set, 44521 is the strongest representative because it is active, has the most inbound references, and describes a concrete `apply_chat_template` multimodal failure with clear reproduction context.", + "canonical_pr_reason": null, + "best_issue_reason": "44521 is the best standalone issue to keep visible: it is open, specific, and likely to attract/collect follow-up reports around the same chat-template path.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43122", + "right": "issue:44521", + "accept": false, + "reason": "Both involve tokenization/chat-template behavior, but one is a version-to-version tokenization change and the other is zeroed assistant masks for multimodal inputs; different bugs and code paths." + }, + { + "left": "issue:42886", + "right": "issue:43066", + "accept": false, + "reason": "Both are tokenizer-related, but one is offline cache loading failure and the other is wrong tokenizer decoder type in v5; not the same underlying defect." + }, + { + "left": "issue:42831", + "right": "issue:44038", + "accept": false, + "reason": "These concern different model/precision issues: FineGrainedFP8 accuracy vs a Qwen3-VL-Moe bug in Transformers 5.0." + }, + { + "left": "issue:44117", + "right": "issue:44704", + "accept": false, + "reason": "One is a tokenizer mapping assumption in `from_pretrained`; the other is `AutoProcessor.from_pretrained` dropping kwargs to `cached_file`. Similar area, different failure mode." + }, + { + "left": "issue:30990", + "right": "issue:33357", + "accept": false, + "reason": "Completely different user-facing failures: Sentence Transformers loading hang vs MacOS bus error with a CLIP model." + }, + { + "left": "issue:43122", + "right": "issue:44514", + "accept": false, + "reason": "Version-dependent tokenization mismatch and a batched `apply_chat_template` crash with `padding=False` are distinct bugs." + }, + { + "left": "issue:42371", + "right": "issue:44263", + "accept": false, + "reason": "TF32 configuration guidance and a `torch.split()` return-value issue in a model-specific indexer are unrelated." + }, + { + "left": "issue:42907", + "right": "issue:44945", + "accept": false, + "reason": "Saving dequantized models and incorrect pipeline-parallel output are different subsystems and failure modes." + }, + { + "left": "issue:44704", + "right": "issue:45290", + "accept": false, + "reason": "Both touch processor/chat-template flows, but one is missing kwargs in `cached_file` while the other is a crash on assistant messages with tool calls and no content." + }, + { + "left": "issue:43232", + "right": "issue:45245", + "accept": false, + "reason": "Generation sync/update ordering and a categorical cardinality limit error are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 105, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:39401", + "issue:41628", + "issue:42907", + "issue:43116", + "issue:43122", + "issue:43232", + "issue:43408", + "issue:43475", + "issue:43937", + "issue:43976", + "issue:44488", + "issue:44625", + "issue:44743", + "issue:44779", + "issue:44829", + "issue:44945", + "issue:45200", + "issue:45278" + ], + "soft_pairs": [ + "issue:43976|issue:44945", + "issue:43232|issue:44743", + "issue:39401|issue:44488", + "issue:41628|issue:45278", + "issue:42907|issue:43937", + "issue:44625|issue:44829", + "issue:43116|issue:45200", + "issue:43408|issue:43475", + "issue:43122|issue:44779" + ], + "prepared_review_unit_hash": "8769954b511fd9595d08fadf5e939a7d8f86ec7c89120dd3f4e88296acc5cde8", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13031, + "estimated_input_tokens": 3258, + "estimated_eval_tokens": 6772 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No soft pair looks like the same underlying bug/change; the cluster is mostly a set of unrelated model-, tokenizer-, import-, and generation-regression reports. I\u2019d use the broad upgrade/import regression as the most representative issue, but only as a loose anchor, not a true duplicate target.", + "confidence": 0.92, + "canonical_issue_reason": "issue:45278 is the broadest, most general user-facing regression report in the set (multiple import failures after upgrading), so it is the best representative issue if a single anchor is needed.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45278 is the most general and highest-level report here; the others are narrower model- or feature-specific bugs that do not unify the cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43976", + "right": "issue:44945", + "accept": false, + "reason": "Both are runtime regressions, but one is a Python-version incompatibility/import issue and the other is an incorrect-generation-output issue; different code paths and symptoms." + }, + { + "left": "issue:43232", + "right": "issue:44743", + "accept": false, + "reason": "Both involve generation state, but one is about sync_gpus updating kwargs and the other is about recurrent states being reset in modular Qwen3.5; not the same bug." + }, + { + "left": "issue:39401", + "right": "issue:44488", + "accept": false, + "reason": "Both mention tokenizer-related problems, but one is wrong offset mappings for Qwen3 and the other is failing to load a specific model; too different in concrete failure mode." + }, + { + "left": "issue:41628", + "right": "issue:45278", + "accept": false, + "reason": "45278 is a broad upgrade/import regression, but 41628 is a specific missing AutoImageProcessor import; similar theme, not the same underlying bug." + }, + { + "left": "issue:42907", + "right": "issue:43937", + "accept": false, + "reason": "Dequantized model save failure and invalid GenerationConfig are unrelated subsystems with different failure signatures." + }, + { + "left": "issue:44625", + "right": "issue:44829", + "accept": false, + "reason": "One is a config propagation bug in Qwen3.5 classification setup; the other is a training degeneration with flash_attention_3. Different bugs and likely different fixes." + }, + { + "left": "issue:43116", + "right": "issue:45200", + "accept": false, + "reason": "Example-script multi-label output handling and Gemma 4 multimodal token-type defaults are unrelated training/setup issues." + }, + { + "left": "issue:43408", + "right": "issue:43475", + "accept": false, + "reason": "Both are SAM 3 video related, but one is a model-type mismatch warning and the other is a missing attribute on the vision encoder output; not the same code-path problem." + }, + { + "left": "issue:43122", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer regressions in v5, but they affect different model families and different tokenization behaviors; not mergeable as one duplicate." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 106, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:29127", + "issue:30990", + "issue:39401", + "issue:41628", + "issue:42371", + "issue:42831", + "issue:43550", + "issue:43992", + "issue:44261", + "issue:44295", + "issue:44297", + "issue:44360", + "issue:44373", + "issue:44485", + "issue:44521", + "issue:44704", + "issue:44987" + ], + "soft_pairs": [ + "issue:43992|issue:44704", + "issue:44360|issue:44485", + "issue:29127|issue:30990", + "issue:44295|issue:44297", + "issue:41628|issue:44987", + "issue:39401|issue:44521", + "issue:44261|issue:44373", + "issue:42371|issue:42831", + "issue:43550|issue:44295" + ], + "prepared_review_unit_hash": "80cbf3b7c6345fbfbb036cca55c2ae3048550754b716b6307258ffc1d55cabec", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12329, + "estimated_input_tokens": 3083, + "estimated_eval_tokens": 6422 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items do not form a coherent duplicate cluster: they span unrelated issues in loading, tokenization, model internals, configuration, docs, and runtime behavior. None of the soft pairs look like the same underlying bug or change.", + "confidence": 0.97, + "canonical_issue_reason": "issue:44521 is the most discussion-heavy open issue and the closest thing to a cluster anchor, but the overall set is heterogeneous, so this is an arbitrary representative rather than a true canonical duplicate.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44521 is the best global issue candidate by activity and inbound references, but it should be treated only as a stand-in since the surrounding issues are unrelated.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43992", + "right": "issue:44704", + "accept": false, + "reason": "Both involve loading/from_pretrained, but one is a missing model weight in UMT5Encoder and the other is AutoProcessor kwargs passing to cached_file; different code paths and failures." + }, + { + "left": "issue:44360", + "right": "issue:44485", + "accept": false, + "reason": "DSA indexer/ReLU and GLM-5 RoPE implementation are unrelated model internals with no shared bug mechanism." + }, + { + "left": "issue:29127", + "right": "issue:30990", + "accept": false, + "reason": "One is a LayoutLMv3 box-annotation error message issue, the other is Sentence Transformers hanging on load; not the same problem." + }, + { + "left": "issue:44295", + "right": "issue:44297", + "accept": false, + "reason": "Reading a position_ids buffer and a tokenizer_class mismatch in tokenizer_config.json are distinct tokenizer/model serialization bugs." + }, + { + "left": "issue:41628", + "right": "issue:44987", + "accept": false, + "reason": "AutoImageProcessor import failure and a transformers>=5.1.0 model load failure are different loading issues affecting different APIs." + }, + { + "left": "issue:39401", + "right": "issue:44521", + "accept": false, + "reason": "Wrong offset_mapping and zero assistant_masks both touch tokenization/template logic, but they affect different outputs and code paths." + }, + { + "left": "issue:44261", + "right": "issue:44373", + "accept": false, + "reason": "Missing rms_norm_eps causing precision error is a runtime config bug; wrong docstring for position_ids is documentation only." + }, + { + "left": "issue:42371", + "right": "issue:42831", + "accept": false, + "reason": "TF32 behavior settings and FineGrainedFP8 accuracy are separate numerical-precision concerns with different fixes." + }, + { + "left": "issue:43550", + "right": "issue:44295", + "accept": false, + "reason": "torch.compile/SDPA failure in Bamba-9B-v2 is unrelated to a position_ids buffer read error." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 107, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36296", + "issue:42222", + "issue:42907", + "issue:43116", + "issue:43122", + "issue:43278", + "issue:43441", + "issue:43452", + "issue:43824", + "issue:43927", + "issue:44038", + "issue:44222", + "issue:44295", + "issue:44297", + "issue:44351", + "issue:44912", + "issue:44945", + "issue:44977" + ], + "soft_pairs": [ + "issue:42907|issue:43452", + "issue:36296|issue:43116", + "issue:43927|issue:44295", + "issue:44038|issue:44297", + "issue:43824|issue:44351", + "issue:44038|issue:44222", + "issue:43122|issue:44977", + "issue:43278|issue:44945", + "issue:42222|issue:43441", + "issue:42907|issue:44222", + "issue:42907|issue:44912" + ], + "prepared_review_unit_hash": "e912f194997e7789c0a0fe328a6eba2548748aa35d9575eea8f60cac323270fa", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13532, + "estimated_input_tokens": 3383, + "estimated_eval_tokens": 7022 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is noisy and heterogeneous: it mixes unrelated bugs across tokenization, save/load, quantization, imports, FlashAttention, parallelism, and model-specific regressions. I do not see a single underlying duplicate issue or PR to canonicalize.", + "confidence": 0.88, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42907", + "right": "issue:43452", + "accept": false, + "reason": "Different failure modes: saving dequantized Ministral/Devstral weights vs gguf_file breaking AutoTokenizer/AutoModelForCausalLM loading." + }, + { + "left": "issue:36296", + "right": "issue:43116", + "accept": false, + "reason": "Tensor-parallel training bug and multi-label classification example returning empty results are unrelated." + }, + { + "left": "issue:43927", + "right": "issue:44295", + "accept": false, + "reason": "One is custom token IDs lost on save/load; the other is position_ids access after registering as a buffer. Different code paths and symptoms." + }, + { + "left": "issue:44038", + "right": "issue:44297", + "accept": false, + "reason": "Qwen3-VL-Moe / Transformers 5.0 compatibility is not the same bug as tokenizer_class mismatch in tokenizer_config.json." + }, + { + "left": "issue:43824", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but for different symbols and likely different export/packaging issues." + }, + { + "left": "issue:44038", + "right": "issue:44222", + "accept": false, + "reason": "Both mention MoE, but one is a model/version compatibility bug and the other is FP8 save_pretrained behavior; not the same concrete issue." + }, + { + "left": "issue:43122", + "right": "issue:44977", + "accept": false, + "reason": "Tokenizer behavior changed across versions vs Qwen3.5 flash-attention generation failure; different subsystems and symptoms." + }, + { + "left": "issue:43278", + "right": "issue:44945", + "accept": false, + "reason": "Embedding dtype mismatch in evaluate vs incorrect output under pipeline parallelism are distinct runtime bugs." + }, + { + "left": "issue:42222", + "right": "issue:43441", + "accept": false, + "reason": "VitPose model breakage and Ministral-3 FlashAttention failure are model-specific and unrelated." + }, + { + "left": "issue:42907", + "right": "issue:44222", + "accept": false, + "reason": "Both concern model saving/quantization, but dequantized save failure and FP8 save_pretrained are different concrete bugs." + }, + { + "left": "issue:42907", + "right": "issue:44912", + "accept": false, + "reason": "Dequantized Ministral/Devstral save failure is not the same as MXFP4 loading falling back to bf16." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 108, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:22355", + "issue:29127", + "issue:38617", + "issue:42371", + "issue:42673", + "issue:43064", + "issue:43116", + "issue:43493", + "issue:43673", + "issue:43784", + "issue:43874", + "issue:43937", + "issue:44075", + "issue:44485", + "issue:44933", + "issue:44936", + "issue:45072", + "issue:45278" + ], + "soft_pairs": [ + "issue:38617|issue:45278", + "issue:43784|issue:44933", + "issue:43064|issue:43673", + "issue:42673|issue:45072", + "issue:43937|issue:44485", + "issue:44075|issue:44936", + "issue:43116|issue:43493", + "issue:42371|issue:43874", + "issue:22355|issue:29127" + ], + "prepared_review_unit_hash": "e3b952252f22f8f1d26d07d11627f0e376d8e7b6f4510498c6a5147d27cada67", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 13041, + "estimated_input_tokens": 3261, + "estimated_eval_tokens": 6778 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "No soft pair looks like the same underlying bug/change closely enough to merge. Most pairs share only a broad area (imports, training, GLM, etc.) but describe different failures. I selected one highly specific, concrete issue as the canonical/best representative.", + "confidence": 0.87, + "canonical_issue_reason": "issue:43064 is the most concrete and self-contained bug report here: it names a precise training setup (v5 + FSDP2 + PEFT + cpu_ram_efficient_loading=True) and a specific failure mode (wrong optimizer states/params on non-zero ranks).", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43064 is the strongest issue candidate because it is specific, reproducible, and describes a clearly bounded code-path problem.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:38617", + "right": "issue:45278", + "accept": false, + "reason": "Both are import-related, but one is a specific missing symbol and the other is a broad post-upgrade import breakage; not the same underlying bug." + }, + { + "left": "issue:43784", + "right": "issue:44933", + "accept": false, + "reason": "Both mention import errors, but they concern different modules and different failures; too broad to be the same bug." + }, + { + "left": "issue:43064", + "right": "issue:43673", + "accept": false, + "reason": "Training optimizer-state corruption under FSDP2/PEFT is unrelated to chunked-prefill GenerationMixin cache handling." + }, + { + "left": "issue:42673", + "right": "issue:45072", + "accept": false, + "reason": "VRAM leakage in multi-threaded dataloaders and bfloat16 dtype mismatches are different runtime problems in different code paths." + }, + { + "left": "issue:43937", + "right": "issue:44485", + "accept": false, + "reason": "Same model family, but one is a GenerationConfig validation issue and the other is a RoPE implementation discussion; not the same bug." + }, + { + "left": "issue:44075", + "right": "issue:44936", + "accept": false, + "reason": "Optimizer argument handling and trainer evaluate-after-train failure are distinct behaviors with no clear shared code defect." + }, + { + "left": "issue:43116", + "right": "issue:43493", + "accept": false, + "reason": "A multi-label example script bug and a SigLIP2 implementation discrepancy are unrelated issues." + }, + { + "left": "issue:42371", + "right": "issue:43874", + "accept": false, + "reason": "TF32 API migration guidance is unrelated to a missing image processor method causing multimodal token counting failures." + }, + { + "left": "issue:22355", + "right": "issue:29127", + "accept": false, + "reason": "An ONNX import failure and a LayoutLMv3 error-message clarity issue are not the same underlying bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 109, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:33453", + "issue:38617", + "issue:39401", + "issue:41628", + "issue:42673", + "issue:43116", + "issue:43421", + "issue:43450", + "issue:43825", + "issue:43906", + "issue:43931", + "issue:43937", + "issue:43986", + "issue:44484", + "issue:44779", + "issue:44945", + "issue:44977", + "issue:45310" + ], + "soft_pairs": [ + "issue:43116|issue:43825", + "issue:39401|issue:44779", + "issue:33453|issue:39401", + "issue:43931|issue:44945", + "issue:43937|issue:44977", + "issue:44484|issue:45310", + "issue:42673|issue:43906", + "issue:39401|issue:43421", + "issue:43450|issue:43986", + "issue:38617|issue:41628" + ], + "prepared_review_unit_hash": "939992e3ebbb6de639a95c8b1f4598e0bb95038ac77fd99decfea0bceef4d02b", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13273, + "estimated_input_tokens": 3319, + "estimated_eval_tokens": 6894 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous: tokenizer regressions, model-loading import errors, generation/config issues, and processor bugs. The soft pairs are only superficially similar and do not look like the same underlying defect, so no merges are justified.", + "confidence": 0.84, + "canonical_issue_reason": "Issue 39401 is the clearest concrete bug report in the cluster, but only as a representative of the tokenizer-regression subset; the overall cluster is too mixed to treat as one duplicate set.", + "canonical_pr_reason": null, + "best_issue_reason": "39401 has a specific, reproducible symptom (wrong offset_mapping) and a narrow scope, making it the strongest standalone issue in this set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43116", + "right": "issue:43825", + "accept": false, + "reason": "Different problems: example script returns empty multi-label outputs vs a pipeline() v5 warning/message issue. Same broad area, not the same bug." + }, + { + "left": "issue:39401", + "right": "issue:44779", + "accept": false, + "reason": "Both are tokenizer regressions, but they affect different models and symptoms (offset_mapping vs incorrect results). Not enough evidence of one underlying defect." + }, + { + "left": "issue:33453", + "right": "issue:39401", + "accept": false, + "reason": "Tokenizer loading regression and Qwen3 offset_mapping bug are distinct failure modes." + }, + { + "left": "issue:43931", + "right": "issue:44945", + "accept": false, + "reason": "Model weight-shape mismatch on load is unrelated to incorrect output under pipeline parallelism." + }, + { + "left": "issue:43937", + "right": "issue:44977", + "accept": false, + "reason": "GenerationConfig validation error and flash-attention generation failure are different code paths and symptoms." + }, + { + "left": "issue:44484", + "right": "issue:45310", + "accept": false, + "reason": "A save_pretrained shard-size question is unrelated to a Qwen3.5 from_pretrained loading error." + }, + { + "left": "issue:42673", + "right": "issue:43906", + "accept": false, + "reason": "VRAM leak in multi-thread dataloading is not shown to be the same as the isolated reproduction of issue 38071." + }, + { + "left": "issue:39401", + "right": "issue:43421", + "accept": false, + "reason": "Both touch tokenizer behavior, but one is a Qwen3 offset_mapping regression and the other is runtime special-token/post-processor updates. Different defects." + }, + { + "left": "issue:43450", + "right": "issue:43986", + "accept": false, + "reason": "Incorrect batched video shape and a crash loading video models without torchvision are separate issues." + }, + { + "left": "issue:38617", + "right": "issue:41628", + "accept": false, + "reason": "Both are import errors, but they concern different missing symbols and likely different regressions." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 110, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:38617", + "issue:41628", + "issue:42175", + "issue:42222", + "issue:42673", + "issue:43577", + "issue:43582", + "issue:43784", + "issue:44117", + "issue:44220", + "issue:44291", + "issue:44410", + "issue:44829", + "issue:45071", + "issue:45081", + "issue:45216", + "issue:45310" + ], + "soft_pairs": [ + "issue:44291|issue:45071", + "issue:44117|issue:45081", + "issue:42673|issue:43577", + "issue:44829|issue:45216", + "issue:44410|issue:45310", + "issue:42673|issue:43582", + "issue:38617|issue:44220", + "issue:42175|issue:42222", + "issue:41628|issue:43784" + ], + "prepared_review_unit_hash": "512077f1cd29d809d0c3360f1e72e6b0e873c15ab5eb2e34192d387bb92e084f", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12743, + "estimated_input_tokens": 3186, + "estimated_eval_tokens": 6628 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the linked issues cover unrelated import errors, model-loading regressions, tokenizer bugs, backend/package installation problems, and model-specific architecture issues. None of the soft pairs look like the same underlying bug, so no soft duplicates should be merged.", + "confidence": 0.91, + "canonical_issue_reason": "issue:45071 is the broadest and most central regression-style report in the set (core PretrainedConfig type checking in v5.4.0), but the overall cluster is still too heterogeneous to treat as a true duplicate group.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45071 is the strongest representative if one issue must anchor the cluster: it targets a core regression with broad impact and clear version scope.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44291", + "right": "issue:45071", + "accept": false, + "reason": "Both are v5.4.0-era loading regressions, but one is about init_empty_weights and an unexpected argument while the other is about PretrainedConfig type checking; different code paths and fixes." + }, + { + "left": "issue:44117", + "right": "issue:45081", + "accept": false, + "reason": "Tokenizer mapping None handling and a Mistral regex patch crash are distinct bugs in different tokenizer paths." + }, + { + "left": "issue:42673", + "right": "issue:43577", + "accept": false, + "reason": "Different models and failure modes: VRAM leak in Qwen3ForCausalLM threading vs dtype staying float32 in BLIP-2 loading." + }, + { + "left": "issue:44829", + "right": "issue:45216", + "accept": false, + "reason": "FlashAttention-3 degenerate training and Qwen3.5 checkpoint save corruption are unrelated behavioral regressions." + }, + { + "left": "issue:44410", + "right": "issue:45310", + "accept": false, + "reason": "Both concern Qwen3-family models, but one is missing projections in qwen3next layers while the other is a from_pretrained error for Qwen3.5 MoE; not the same bug." + }, + { + "left": "issue:42673", + "right": "issue:43582", + "accept": false, + "reason": "A multithreaded Qwen3 VRAM leak is unrelated to an Apple Silicon TypeError in caching_allocator_warmup." + }, + { + "left": "issue:38617", + "right": "issue:44220", + "accept": false, + "reason": "ImportError for layer_type_validation and a torch fbank extraction issue are different subsystems and symptoms." + }, + { + "left": "issue:42175", + "right": "issue:42222", + "accept": false, + "reason": "Package extras/backend inclusion is unrelated to VitPose model breakage." + }, + { + "left": "issue:41628", + "right": "issue:43784", + "accept": false, + "reason": "Both are import-time failures, but they affect different symbols and packages and point to different breakages." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 111, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:42371", + "issue:42617", + "issue:43066", + "issue:43097", + "issue:43122", + "issue:43408", + "issue:43673", + "issue:43742", + "issue:43874", + "issue:43937", + "issue:44261", + "issue:44493", + "issue:44496", + "issue:44933", + "issue:44964", + "issue:45278", + "issue:45341" + ], + "soft_pairs": [ + "issue:42371|issue:44261", + "issue:44933|issue:45278", + "issue:43937|issue:44496", + "issue:43408|issue:43742", + "issue:42617|issue:45341", + "issue:43066|issue:43122", + "issue:44964|issue:45278", + "issue:43673|issue:45341", + "issue:43097|issue:44493", + "issue:43874|issue:44261" + ], + "prepared_review_unit_hash": "969e74aaf1b8eb746a639c4e7784f6a86c244f0862e2b26d647e7f3e5b6336d2", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12659, + "estimated_input_tokens": 3165, + "estimated_eval_tokens": 6586 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The items are mostly a heterogeneous mix of Transformers v5 regressions (imports, tokenizer changes, model loading, multimodal/config errors) rather than one duplicate bug cluster. No soft edge pair looks like the same concrete underlying issue.", + "confidence": 0.56, + "canonical_issue_reason": "Issue 45278 is the broadest open report and best fits the general upgrade/regression theme, but the cluster is too mixed to treat it as a true duplicate hub.", + "canonical_pr_reason": null, + "best_issue_reason": "45278 is the best representative solely because it is broad, open, and phrased as a general post-upgrade failure rather than a narrow model-specific symptom.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42371", + "right": "issue:44261", + "accept": false, + "reason": "TF32 settings/API guidance is unrelated to MLA q_a_layernorm missing rms_norm_eps and the precision mismatch it causes." + }, + { + "left": "issue:44933", + "right": "issue:45278", + "accept": false, + "reason": "Both mention imports, but 44933 is a specific missing symbol while 45278 is a broad upgrade-related import-error report; not the same bug." + }, + { + "left": "issue:43937", + "right": "issue:44496", + "accept": false, + "reason": "GenerationConfig validation for GLM-5 and an unrecognized-model/config.json model_type error are different loading failures with different root causes." + }, + { + "left": "issue:43408", + "right": "issue:43742", + "accept": false, + "reason": "A model-type warning for sam3_video vs sam3_tracker is unrelated to the MobileLLM key error; different models and error paths." + }, + { + "left": "issue:42617", + "right": "issue:45341", + "accept": false, + "reason": "Running 3d_parallel.py and a bug in testing_utils.py are unrelated to the same underlying code-path." + }, + { + "left": "issue:43066", + "right": "issue:43122", + "accept": false, + "reason": "Both are tokenizer-related regressions, but one is a decoder-type mismatch and the other is changed tokenization output; not clearly the same concrete defect." + }, + { + "left": "issue:44964", + "right": "issue:45278", + "accept": false, + "reason": "Phi-4 multimodal load failure is a model-specific loading issue, not the same as the general import-error regression in 45278." + }, + { + "left": "issue:43673", + "right": "issue:45341", + "accept": false, + "reason": "Chunked-prefill cache missing in GenerationMixin and a testing_utils bug are unrelated." + }, + { + "left": "issue:43097", + "right": "issue:44493", + "accept": false, + "reason": "Removal of tie_embeddings_and_encoder_decoder and unexpected position-id keys are different API/config regressions." + }, + { + "left": "issue:43874", + "right": "issue:44261", + "accept": false, + "reason": "Missing get_number_of_image_patches in an image processor and absent rms_norm_eps in MLA q_a_layernorm are different multimodal/config issues." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 112, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:41628", + "issue:42222", + "issue:42994", + "issue:43012", + "issue:43097", + "issue:43408", + "issue:43441", + "issue:43452", + "issue:43784", + "issue:43976", + "issue:43986", + "issue:44263", + "issue:44484", + "issue:44485", + "issue:44829", + "issue:45003", + "issue:45071", + "issue:45341" + ], + "soft_pairs": [ + "issue:43986|issue:44484", + "issue:43441|issue:44829", + "issue:42222|issue:42994", + "issue:43012|issue:43408", + "issue:44263|issue:44485", + "issue:41628|issue:45071", + "issue:45003|issue:45341", + "issue:43784|issue:43976", + "issue:43097|issue:43452" + ], + "prepared_review_unit_hash": "b7cf783bce4066135239e7e7676135f6a39a7b2b936b3952fb6c4d2abca9d8ab", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 9, + "serialized_chars": 12979, + "estimated_input_tokens": 3245, + "estimated_eval_tokens": 6746 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a loose mix of unrelated Transformers issues with only superficial title similarity. None of the soft pairs look like the same underlying bug or change, so all suggested merges should be rejected.", + "confidence": 0.93, + "canonical_issue_reason": "issue:45003 is the strongest representative because it is an open, core-runtime bug in modeling_utils with broader impact than the others, and it has meaningful discussion activity.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45003 is the most suitable single representative of the cluster because it targets a foundational code path and is actionable as an unresolved bug.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43986", + "right": "issue:44484", + "accept": false, + "reason": "Different bugs: one is an AutoProcessor/video-model dependency crash, the other is a save_pretrained shard-size question." + }, + { + "left": "issue:43441", + "right": "issue:44829", + "accept": false, + "reason": "Both mention FlashAttention, but one is a specific Ministral-3 failure and the other is a training degeneration issue for sequence classification; not the same concrete bug." + }, + { + "left": "issue:42222", + "right": "issue:42994", + "accept": false, + "reason": "Unrelated: vitpose model loading/regression versus quantized model saving failure." + }, + { + "left": "issue:43012", + "right": "issue:43408", + "accept": false, + "reason": "Both are warning reports, but they concern different model/type combinations and different code paths." + }, + { + "left": "issue:44263", + "right": "issue:44485", + "accept": false, + "reason": "Different topics: a torch.split return-value issue in one model component versus a RoPE implementation discussion." + }, + { + "left": "issue:41628", + "right": "issue:45071", + "accept": false, + "reason": "Import error for AutoImageProcessor is unrelated to PretrainedConfig type-checking breakage." + }, + { + "left": "issue:45003", + "right": "issue:45341", + "accept": false, + "reason": "Both are utility-level bugs, but one is unsafe sys.modules access in modeling_utils and the other is a testing_utils bug; not the same defect." + }, + { + "left": "issue:43784", + "right": "issue:43976", + "accept": false, + "reason": "A sentence-transformers import NameError is not the same as a Python-version compatibility regression." + }, + { + "left": "issue:43097", + "right": "issue:43452", + "accept": false, + "reason": "Removing tie_embeddings_and_encoder_decoder and gguf_file.from_pretrained breakage are separate regressions with different failure modes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 113, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:42222", + "issue:43335", + "issue:43408", + "issue:43874", + "issue:43931", + "issue:43986", + "issue:44038", + "issue:44117", + "issue:44263", + "issue:44368", + "issue:44410", + "issue:44496", + "issue:44829", + "issue:44987", + "issue:45071", + "issue:45245", + "issue:45341", + "issue:45372" + ], + "soft_pairs": [ + "issue:42222|issue:44496", + "issue:44117|issue:44987", + "issue:43931|issue:44410", + "issue:43986|issue:45341", + "issue:43874|issue:44263", + "issue:43408|issue:44368", + "issue:43335|issue:45071", + "issue:43986|issue:45372", + "issue:43931|issue:44038", + "issue:44829|issue:45245" + ], + "prepared_review_unit_hash": "086608f88dee883237a21defa30abfb9cc955dd8e1238674046e4449a4555f6a", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13589, + "estimated_input_tokens": 3398, + "estimated_eval_tokens": 7052 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the items are separate bug reports across different models, configs, and utilities, with only superficial similarity in a few titles. I would not merge any of the soft edges.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue fits this set; the reports span unrelated bugs rather than one duplicate problem.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43986 is the strongest standalone report to anchor triage because it has a clear reproducible crash path and multiple inbound references, but it should not absorb the other issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42222", + "right": "issue:44496", + "accept": false, + "reason": "Different model families and failures: vitpose is reported broken, while Olmo-Hybrid-Instruct-SFT-7B is an unrecognized-model/config.json problem." + }, + { + "left": "issue:44117", + "right": "issue:44987", + "accept": false, + "reason": "Both mention loading behavior, but one is a tokenizer mapping None bug and the other is a model-loading failure for a specific repo; not the same code path." + }, + { + "left": "issue:43931", + "right": "issue:44410", + "accept": false, + "reason": "Both are Qwen-related, but one is a weight-shape mismatch during loading and the other is missing projections in layer 0 of qwen3next; distinct bugs." + }, + { + "left": "issue:43986", + "right": "issue:45341", + "accept": false, + "reason": "The first is a video-model AutoProcessor/torchvision dependency crash; the second is a testing_utils bug. No common underlying defect." + }, + { + "left": "issue:43874", + "right": "issue:44263", + "accept": false, + "reason": "Both touch GLM/MoE areas, but one is a missing processor method causing an AttributeError and the other is torch.split return-value handling in a different component." + }, + { + "left": "issue:43408", + "right": "issue:44368", + "accept": false, + "reason": "These are unrelated warnings: sam3 tracker/video model mismatch versus tie_word_embeddings config warning for Qwen3.5 fine-tuning." + }, + { + "left": "issue:43335", + "right": "issue:45071", + "accept": false, + "reason": "SwitchTransformers sparse-layer construction and PretrainedConfig type checking are separate problems in different subsystems." + }, + { + "left": "issue:43986", + "right": "issue:45372", + "accept": false, + "reason": "One is missing torchvision for video processor loading; the other is an import error for ReasoningEffort affecting Gemma 4 processor loading. Different dependencies and failure modes." + }, + { + "left": "issue:43931", + "right": "issue:44038", + "accept": false, + "reason": "Both are Qwen3-VL variants, but one is a shape mismatch on load and the other concerns transformers 5.0 behavior with Qwen3-VL-Moe; not mergeable as the same bug." + }, + { + "left": "issue:44829", + "right": "issue:45245", + "accept": false, + "reason": "Flash-attention training degeneration and the 2^24 categories runtime limit are unrelated issues despite both being runtime failures." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 114, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:38617", + "issue:43064", + "issue:43531", + "issue:43976", + "issue:44062", + "issue:44117", + "issue:44261", + "issue:44315", + "issue:44351", + "issue:44410", + "issue:44464", + "issue:44485", + "issue:44704", + "issue:45071", + "issue:45103", + "issue:45341", + "issue:45375" + ], + "soft_pairs": [ + "issue:43976|issue:45071", + "issue:43064|issue:44315", + "issue:44464|issue:45071", + "issue:43531|issue:45341", + "issue:38617|issue:44351", + "issue:44261|issue:44485", + "issue:44704|issue:45103", + "issue:44410|issue:45375", + "issue:44062|issue:44117" + ], + "prepared_review_unit_hash": "48bbcf175243f65b6fb7b913d5aeb155672ea50aafb06b756fb45cc64b166b82", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 9, + "serialized_chars": 12523, + "estimated_input_tokens": 3131, + "estimated_eval_tokens": 6518 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the issues cover unrelated import errors, tokenizer/config regressions, model-specific bugs, training bugs, and docs/annotation tooling issues. None of the soft pairs look like true duplicates.", + "confidence": 0.96, + "canonical_issue_reason": "No single issue is a valid canonical for this cluster because the items describe different underlying bugs in different subsystems and model families.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue is a strong global representative; the overlap in wording is superficial rather than a shared root cause.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43976", + "right": "issue:45071", + "accept": false, + "reason": "Both are version/regression reports, but one is a Python compatibility failure and the other is a PretrainedConfig type-checking break; different root causes and fixes." + }, + { + "left": "issue:43064", + "right": "issue:44315", + "accept": false, + "reason": "Completely different areas: optimizer state handling for FSDP2/PEFT versus Liger Kernel application with model_init." + }, + { + "left": "issue:44464", + "right": "issue:45071", + "accept": false, + "reason": "Chunked generation/compiled forward inconsistency is unrelated to PretrainedConfig typing." + }, + { + "left": "issue:43531", + "right": "issue:45341", + "accept": false, + "reason": "Qwen3-MoE sliding_window behavior and a testing_utils bug are unrelated." + }, + { + "left": "issue:38617", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors, but they concern different missing symbols in different modules with no shared code path evident." + }, + { + "left": "issue:44261", + "right": "issue:44485", + "accept": false, + "reason": "Different model-specific implementation issues: MLA q_a_layernorm epsilon/config handling versus GLM-5 RoPE logic." + }, + { + "left": "issue:44704", + "right": "issue:45103", + "accept": false, + "reason": "AutoProcessor cached_file kwargs propagation is unrelated to an auto_docstring AttributeError from postponed annotations." + }, + { + "left": "issue:44410", + "right": "issue:45375", + "accept": false, + "reason": "Both mention missing fields, but one is a Qwen3-next architecture projection bug and the other is a config field being dropped by strict typing; different concrete failures." + }, + { + "left": "issue:44062", + "right": "issue:44117", + "accept": false, + "reason": "Tokenizer AddedToken keyword duplication and TOKENIZER_MAPPING_NAMES returning None are different tokenizer initialization bugs." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 115, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:38617", + "issue:43066", + "issue:43097", + "issue:43502", + "issue:43582", + "issue:43824", + "issue:43873", + "issue:43906", + "issue:43957", + "issue:43986", + "issue:43992", + "issue:44038", + "issue:44117", + "issue:44351", + "issue:44410", + "issue:44484", + "issue:45341" + ], + "soft_pairs": [ + "issue:43502|issue:43992", + "issue:43986|issue:44117", + "issue:38617|issue:43097", + "issue:44038|issue:44410", + "issue:43582|issue:45341", + "issue:43066|issue:43906", + "issue:43957|issue:44351", + "issue:43824|issue:43986", + "issue:43992|issue:44484", + "issue:43097|issue:43873" + ], + "prepared_review_unit_hash": "49e633aa9d2467f218b63c178e72d7b8e80b9db160e7a5af4970099f74199975", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12702, + "estimated_input_tokens": 3176, + "estimated_eval_tokens": 6608 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly unrelated Transformers bug reports. The soft pairs share broad version/loading/theme similarity, but not the same underlying defect or fix path, so none should be merged as duplicates.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue stands out; the items cover distinct bugs and regressions across imports, loading, quantization/offloading, and model-specific breakages.", + "canonical_pr_reason": null, + "best_issue_reason": "43502 is the clearest standalone regression report: the symptom is specific ('local_files_only=True' still triggers API calls), the impact is broad, and it is the most actionable candidate for a duplicate anchor if similar reports appear.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43502", + "right": "issue:43992", + "accept": false, + "reason": "Different failures: network calls despite local_files_only vs a missing embed_tokens.weight during UMT5Encoder loading." + }, + { + "left": "issue:43986", + "right": "issue:44117", + "accept": false, + "reason": "One is an AutoProcessor crash without torchvision; the other is a tokenizer mapping None-handling bug. Related loading area, but not the same defect." + }, + { + "left": "issue:38617", + "right": "issue:43097", + "accept": false, + "reason": "Both are v5 breakage reports, but they concern different removed symbols/APIs and different import paths." + }, + { + "left": "issue:44038", + "right": "issue:44410", + "accept": false, + "reason": "Different model families and symptoms; Qwen3-VL-Moe v5 breakage is not the same as missing qkv/gate projections in qwen3next layer 0." + }, + { + "left": "issue:43582", + "right": "issue:45341", + "accept": false, + "reason": "No meaningful overlap beyond being bug reports; Apple Silicon TypeError in caching_allocator_warmup is unrelated to a testing_utils issue." + }, + { + "left": "issue:43066", + "right": "issue:43906", + "accept": false, + "reason": "43906 is an isolated reproduction of another upstream issue, not the same as the 'wrong tokenizer decoder type' report." + }, + { + "left": "issue:43957", + "right": "issue:44351", + "accept": false, + "reason": "Meta-device loading failures and a HybridCache import error are separate code paths with different symptoms and likely fixes." + }, + { + "left": "issue:43824", + "right": "issue:43986", + "accept": false, + "reason": "Missing Qwen2_5_VLForConditionalGeneration export is a model registration/import issue, while the other is a processor crash due to missing torchvision." + }, + { + "left": "issue:43992", + "right": "issue:44484", + "accept": false, + "reason": "A missing embed_tokens.weight during load is unrelated to the max_shard_size default question in save_pretrained()." + }, + { + "left": "issue:43097", + "right": "issue:43873", + "accept": false, + "reason": "Removed tie_embeddings_and_encoder_decoder and quantization/offloading behavior are different problems in different parts of the stack." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 116, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:42886", + "issue:42907", + "issue:43012", + "issue:43097", + "issue:43299", + "issue:43352", + "issue:43381", + "issue:43644", + "issue:43825", + "issue:43901", + "issue:43937", + "issue:44368", + "issue:44380", + "issue:44410", + "issue:44484", + "issue:44655", + "issue:44792", + "issue:44829" + ], + "soft_pairs": [ + "issue:43012|issue:44829", + "issue:43299|issue:44410", + "issue:43352|issue:44380", + "issue:43644|issue:44484", + "issue:43825|issue:44655", + "issue:43097|issue:43901", + "issue:43012|issue:44368", + "issue:42886|issue:43381", + "issue:43937|issue:44792", + "issue:42907|issue:44655" + ], + "prepared_review_unit_hash": "6830af64cbe7d56c8ea0aaf7941173b7a7a861fe09752291efa08367006f37ac", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13406, + "estimated_input_tokens": 3352, + "estimated_eval_tokens": 6960 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are a heterogeneous collection of unrelated Transformers issues spanning cache/offline loading, save_pretrained/pipeline serialization, model-specific loading regressions, attention backend behavior, documentation mismatches, and assorted warning/reporting changes. None of the suggested soft pairs look like the same underlying bug or change.", + "confidence": 0.95, + "canonical_issue_reason": "No single canonical issue fits this cluster; the items are not duplicates of one another and cover distinct subsystems and failure modes.", + "canonical_pr_reason": null, + "best_issue_reason": "No best issue candidate: the cluster is not a duplicate set, so selecting one issue as canonical would be misleading.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43012", + "right": "issue:44829", + "accept": false, + "reason": "Both involve training/runtime warnings, but one is a PyTorch precision warning for bfloat16 compilation while the other is a degenerate-training regression with flash_attention_3; different symptoms and code paths." + }, + { + "left": "issue:43299", + "right": "issue:44410", + "accept": false, + "reason": "Both mention Qwen models, but one is a Qwen3VL MoE loading failure and the other is missing attention projections in qwen3next layer 0; different model families and defects." + }, + { + "left": "issue:43352", + "right": "issue:44380", + "accept": false, + "reason": "Unsupported Flash Attention 2 for a specific model is not the same problem as GPT2 attention scaling being ignored under SDPA/FlashAttention; backend and failure mode differ." + }, + { + "left": "issue:43644", + "right": "issue:44484", + "accept": false, + "reason": "One reports junk in non-persistent buffers during Transformers 5.0.0 loading, the other asks about the default max_shard_size in save_pretrained; unrelated serialization topics." + }, + { + "left": "issue:43825", + "right": "issue:44655", + "accept": false, + "reason": "The first is a bad pipeline() error message about translation tasks in v5, while the second is inability to save Pipeline objects with save_pretrained; same broad area, but different bugs." + }, + { + "left": "issue:43097", + "right": "issue:43901", + "accept": false, + "reason": "One concerns a removed config field in 5.0.0, the other a docs/behavior mismatch for return_all_scores in text classification; distinct API/documentation issues." + }, + { + "left": "issue:43012", + "right": "issue:44368", + "accept": false, + "reason": "Both are warning-related, but one is a float32 precision warning when compiling a bfloat16 model and the other is a tie_word_embeddings warning during LoRA fine-tuning; not the same bug." + }, + { + "left": "issue:42886", + "right": "issue:43381", + "accept": false, + "reason": "Tokenizer offline cache loading and gradient checkpointing in eval mode are unrelated issues affecting different parts of the stack." + }, + { + "left": "issue:43937", + "right": "issue:44792", + "accept": false, + "reason": "A GLM-5 GenerationConfig validation failure and a Janus image-generation test failure involve different models and different failure conditions." + }, + { + "left": "issue:42907", + "right": "issue:44655", + "accept": false, + "reason": "Both mention saving, but one is about saving dequantized Ministrals/Devstrals and the other about saving Pipeline objects; different object types and likely different fixes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 117, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36683", + "issue:43012", + "issue:43064", + "issue:43208", + "issue:43352", + "issue:43525", + "issue:43526", + "issue:43550", + "issue:43582", + "issue:43618", + "issue:43761", + "issue:43976", + "issue:44117", + "issue:44380", + "issue:44483", + "issue:44514", + "issue:44683", + "issue:45290" + ], + "soft_pairs": [ + "issue:43525|issue:44117", + "issue:43012|issue:43550", + "issue:43352|issue:43976", + "issue:43208|issue:43526", + "issue:43064|issue:44380", + "issue:43618|issue:44380", + "issue:44483|issue:44514", + "issue:36683|issue:43582", + "issue:43618|issue:43761", + "issue:43012|issue:44683", + "issue:44483|issue:45290" + ], + "prepared_review_unit_hash": "24ae86765c4afbb99afcb0fc051fcef4f763c20c3c7a7b7fa3750eafe343d910", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13746, + "estimated_input_tokens": 3437, + "estimated_eval_tokens": 7130 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is mostly unrelated. The only plausible duplicate pair is the CLIP vision regression around missing optional outputs; all other soft pairs describe different code paths or symptoms.", + "confidence": 0.87, + "canonical_issue_reason": "No broad canonical across the full set, but issue 43761 is the strongest representative of the CLIP regression cluster because it clearly states the v5-forward regression and affected output path.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43761 is the clearest triage target in this batch: it has a concrete regression claim, an explicit repro condition, and aligns with the only accepted near-duplicate.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43525", + "right": "issue:44117", + "accept": false, + "reason": "Different bugs: one is a missing config attribute in Llama4Config, the other is tokenizer mapping unexpectedly returning None in from_pretrained." + }, + { + "left": "issue:43012", + "right": "issue:43550", + "accept": false, + "reason": "Both mention compilation, but one is a bfloat16 precision warning while the other is a concrete torch.compile + SDPA failure for Bamba; not the same underlying bug." + }, + { + "left": "issue:43352", + "right": "issue:43976", + "accept": false, + "reason": "Unrelated failures: Flash Attention support for Nemotron vs a Python compatibility issue in Transformers 5.1.0." + }, + { + "left": "issue:43208", + "right": "issue:43526", + "accept": false, + "reason": "Completely different subsystems and symptoms: xLSTM training bugs versus a BeitImageProcessorFast label reduction bug." + }, + { + "left": "issue:43064", + "right": "issue:44380", + "accept": false, + "reason": "FSDP2/PEFT optimizer-state corruption is unrelated to GPT2 attention scaling being ignored under SDPA/FlashAttention." + }, + { + "left": "issue:43618", + "right": "issue:44380", + "accept": false, + "reason": "CLIP output assignment regression versus GPT2 attention scaling; different models and different failure modes." + }, + { + "left": "issue:44483", + "right": "issue:44514", + "accept": false, + "reason": "One is an OpenAI-style chat completions endpoint compatibility issue; the other is a Qwen2_5_VL chat template crash on batched tool-call input." + }, + { + "left": "issue:36683", + "right": "issue:43582", + "accept": false, + "reason": "No overlap: Gemma3Config missing vocab_size versus an Apple Silicon TypeError in caching_allocator_warmup." + }, + { + "left": "issue:43618", + "right": "issue:43761", + "accept": true, + "reason": "Both describe the same CLIP v5 regression where optional vision outputs are no longer populated in the model output path." + }, + { + "left": "issue:43012", + "right": "issue:44683", + "accept": false, + "reason": "Different compilation problems: a float32 precision warning for bfloat16 models versus a flex_attention failure on torch >= 2.9." + }, + { + "left": "issue:44483", + "right": "issue:45290", + "accept": false, + "reason": "Both involve chat APIs/templates, but one is an endpoint request rejection and the other is a tokenizer/template crash on assistant tool-call messages." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 118, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36683", + "issue:38617", + "issue:42994", + "issue:43065", + "issue:43299", + "issue:43475", + "issue:43502", + "issue:43756", + "issue:43867", + "issue:43901", + "issue:44038", + "issue:44230", + "issue:44483", + "issue:44485", + "issue:44655", + "issue:45216", + "issue:45381" + ], + "soft_pairs": [ + "issue:43756|issue:44485", + "issue:38617|issue:43502", + "issue:44483|issue:45381", + "issue:43299|issue:43867", + "issue:44038|issue:44230", + "issue:43901|issue:44655", + "issue:42994|issue:44655", + "issue:36683|issue:43475", + "issue:43065|issue:44483", + "issue:43867|issue:45216" + ], + "prepared_review_unit_hash": "7c8586ca1afcd0598e69a5aaa5fafd7f7c32b18e76b896ceb6659fd72b0436ab", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12651, + "estimated_input_tokens": 3163, + "estimated_eval_tokens": 6582 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "Mostly heterogeneous issues with only superficial overlap around model loading/saving and RoPE/model-specific regressions; none of the soft pairs look like the same underlying bug.", + "confidence": 0.94, + "canonical_issue_reason": "issue:43502 is the clearest broadly scoped bug report in the set, with the strongest discussion signal and a concrete, actionable failure mode.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:43502 is the most representative and maintainable issue to anchor the cluster around: specific symptom, general utility, and the highest engagement among the candidates.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43756", + "right": "issue:44485", + "accept": false, + "reason": "Both mention RoPE, but they concern different models and different failure modes (Smollm3 layer dropping vs GLM-5 RoPE implementation)." + }, + { + "left": "issue:38617", + "right": "issue:43502", + "accept": false, + "reason": "ImportError from configuration_utils and unwanted network requests with local_files_only=True are unrelated code paths and symptoms." + }, + { + "left": "issue:44483", + "right": "issue:45381", + "accept": false, + "reason": "One is a chat-completions request validation problem; the other is a Qwen2.5-VL video position_ids bug. Different API surface and model logic." + }, + { + "left": "issue:43299", + "right": "issue:43867", + "accept": false, + "reason": "Different loading bugs: Qwen3VL-Moe regression on v5.0.0.dev0 versus a state_dict ordering issue. Same broad area, not the same defect." + }, + { + "left": "issue:44038", + "right": "issue:44230", + "accept": false, + "reason": "One reports broken loading on transformers 5.0 for Qwen3-VL-Moe; the other is a request for fp8 inference support. Not the same change or bug." + }, + { + "left": "issue:43901", + "right": "issue:44655", + "accept": false, + "reason": "Docs mismatch for return_all_scores is unrelated to save_pretrained failing on Pipeline objects." + }, + { + "left": "issue:42994", + "right": "issue:44655", + "accept": false, + "reason": "Both involve saving, but one is quantized-model saving and the other is Pipeline.save_pretrained. Different underlying problems." + }, + { + "left": "issue:36683", + "right": "issue:43475", + "accept": false, + "reason": "Both are missing-attribute errors, but they affect different model classes and different attributes, so they are not the same bug." + }, + { + "left": "issue:43065", + "right": "issue:44483", + "accept": false, + "reason": "Sam3PixelDecoder dummy Conv2d discussion is unrelated to the /v1/chat/completions request acceptance bug." + }, + { + "left": "issue:43867", + "right": "issue:45216", + "accept": false, + "reason": "Both touch save/load behavior, but one is a generic state_dict sorting error and the other is a Qwen3.5 save_pretrained regression; the evidence is too weak to treat as one defect." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 119, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36683", + "issue:43064", + "issue:43066", + "issue:43352", + "issue:43525", + "issue:43937", + "issue:43976", + "issue:43992", + "issue:44077", + "issue:44220", + "issue:44263", + "issue:44380", + "issue:44492", + "issue:44655", + "issue:44938", + "issue:45310", + "issue:45356", + "issue:45440" + ], + "soft_pairs": [ + "issue:43352|issue:43937", + "issue:44220|issue:44655", + "issue:44263|issue:45440", + "issue:43066|issue:45356", + "issue:36683|issue:44077", + "issue:43064|issue:44492", + "issue:43992|issue:45310", + "issue:44077|issue:44380", + "issue:36683|issue:43525", + "issue:43976|issue:44938" + ], + "prepared_review_unit_hash": "447cb23f6c7e2fabc93420e22ab5b7584e341a1f1bb3a8fe0cf5b554a6dc3037", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13217, + "estimated_input_tokens": 3305, + "estimated_eval_tokens": 6866 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The cluster is a loose collection of unrelated Transformers issues spanning different models, tokenizers, training/runtime compatibility, and config/load-path bugs. The soft pairs share only broad theme similarity, not the same concrete defect, so none should be merged.", + "confidence": 0.96, + "canonical_issue_reason": "No single issue is a true canonical for this cluster because the items describe separate bugs across different model families and subsystems. If forced, issue 45440 is the broadest implementation-regression report, but it does not subsume the others.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45440 is the most substantive and generally useful report in the set: it points to a native-vs-remote implementation divergence in DeepseekV3MoE, which is more representative of a real code-path defect than the narrower config/tokenizer/version-specific reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43352", + "right": "issue:43937", + "accept": false, + "reason": "Different models and failure modes: Flash Attention support gating for Nemotron vs invalid GenerationConfig for GLM-5. Same broad runtime category, but not the same bug." + }, + { + "left": "issue:44220", + "right": "issue:44655", + "accept": false, + "reason": "Unrelated code paths: `_torch_extract_fbank_features()` in audio preprocessing vs `save_pretrained` for Pipeline serialization." + }, + { + "left": "issue:44263", + "right": "issue:45440", + "accept": false, + "reason": "Different model internals and defects: `torch.split` return handling in GlmMoeDsaIndexer vs DeepseekV3MoE diverging from the remote implementation." + }, + { + "left": "issue:43066", + "right": "issue:45356", + "accept": false, + "reason": "Both are tokenizer-related regressions, but they concern different concrete problems: wrong decoder type vs codec handling / warning behavior in Kimi-K2.5." + }, + { + "left": "issue:36683", + "right": "issue:44077", + "accept": false, + "reason": "A missing config attribute on Gemma3Config is not the same bug as patchtsmixer's `post_init` handling." + }, + { + "left": "issue:43064", + "right": "issue:44492", + "accept": false, + "reason": "Training/FSDP optimizer-state corruption is unrelated to a typo in cache strategy text." + }, + { + "left": "issue:43992", + "right": "issue:45310", + "accept": false, + "reason": "Different model-loading failures for different architectures: missing `embed_tokens.weight` in UMT5Encoder vs Qwen3.5 MoE `from_pretrained` error." + }, + { + "left": "issue:44077", + "right": "issue:44380", + "accept": false, + "reason": "Distinct issues in different subsystems: `post_init` policy for patchtsmixer vs GPT2 attention scaling being ignored under SDPA/FlashAttention." + }, + { + "left": "issue:36683", + "right": "issue:43525", + "accept": false, + "reason": "Both are config attribute errors, but for different models and missing fields (`vocab_size` vs `pad_token_id`), so they are not the same underlying defect." + }, + { + "left": "issue:43976", + "right": "issue:44938", + "accept": false, + "reason": "Separate Python-version compatibility regressions with different symptoms and likely different root causes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 120, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36683", + "issue:41669", + "issue:43010", + "issue:43064", + "issue:43352", + "issue:43441", + "issue:43618", + "issue:43867", + "issue:43931", + "issue:44188", + "issue:44242", + "issue:44246", + "issue:44380", + "issue:44938", + "issue:45071", + "issue:45216", + "issue:45278", + "issue:45375" + ], + "soft_pairs": [ + "issue:44188|issue:44380", + "issue:43618|issue:45216", + "issue:36683|issue:44242", + "issue:43010|issue:45278", + "issue:43441|issue:44380", + "issue:41669|issue:44246", + "issue:36683|issue:43352", + "issue:43352|issue:43931", + "issue:43867|issue:45375", + "issue:36683|issue:43064", + "issue:44938|issue:45071" + ], + "prepared_review_unit_hash": "2e69019716950490aa562679280606944f76093391a0902290374425dae1c95a", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13604, + "estimated_input_tokens": 3401, + "estimated_eval_tokens": 7058 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The set is heterogeneous. The soft pairs mostly share only broad subsystem keywords (imports, loading, attention, configs) but each describes a different concrete failure mode, so none are safe duplicate merges.", + "confidence": 0.96, + "canonical_issue_reason": "No single canonical issue is appropriate: the candidate pairs are different bugs/regressions across unrelated code paths, so selecting one anchor would be arbitrary.", + "canonical_pr_reason": "No pull requests are present in this cluster, so there is no canonical PR.", + "best_issue_reason": "No issue is a strong global representative for deduping; at best the cluster loosely spans import/load/regression themes, but not one underlying defect.", + "best_pr_reason": "No pull requests are present in this cluster, so there is no best PR.", + "soft_edge_verdicts": [ + { + "left": "issue:44188", + "right": "issue:44380", + "accept": false, + "reason": "Both involve attention backends, but one is a torch.compile branching bug and the other is GPT2 attention scaling being ignored; different code paths and fixes." + }, + { + "left": "issue:43618", + "right": "issue:45216", + "accept": false, + "reason": "CLIP attentions missing from output and Qwen3.5 save_pretrained checkpoint corruption are unrelated model-output vs serialization regressions." + }, + { + "left": "issue:36683", + "right": "issue:44242", + "accept": false, + "reason": "Gemma3Config missing vocab_size and MoE load-balancing loss omission are unrelated config attribute vs loss-computation bugs." + }, + { + "left": "issue:43010", + "right": "issue:45278", + "accept": false, + "reason": "A no_grad decorator on cache/layer update is unrelated to broad import failures after upgrading versions." + }, + { + "left": "issue:43441", + "right": "issue:44380", + "accept": false, + "reason": "Both mention FlashAttention/backends, but Ministral-3 FA failure and GPT2 attention scaling being ignored are different model-specific bugs." + }, + { + "left": "issue:41669", + "right": "issue:44246", + "accept": false, + "reason": "Both concern import performance, but one is import * usage in model modules and the other is intermittent slow import of transformers; not the same concrete defect." + }, + { + "left": "issue:36683", + "right": "issue:43352", + "accept": false, + "reason": "Gemma3Config missing vocab_size and NemotronHForCausalLM lacking Flash Attention 2.0 support are different model API issues." + }, + { + "left": "issue:43352", + "right": "issue:43931", + "accept": false, + "reason": "FlashAttention 2.0 unsupported for Nemotron vs Qwen3-VL model-loading shape mismatch are unrelated failures." + }, + { + "left": "issue:43867", + "right": "issue:45375", + "accept": false, + "reason": "Sorted state_dict load failure and strict-dropping of a missing config field are both loading-related, but they are different mechanisms and fixes." + }, + { + "left": "issue:36683", + "right": "issue:43064", + "accept": false, + "reason": "A missing config attribute and a Trainer FSDP2/PEFT optimizer-state rank bug are unrelated issues." + }, + { + "left": "issue:44938", + "right": "issue:45071", + "accept": false, + "reason": "Python 3.14 import failure and v5.4.0 PretrainedConfig type-checking regression are both compatibility issues, but not the same root cause." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 121, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36296", + "issue:38617", + "issue:41669", + "issue:43010", + "issue:43575", + "issue:43618", + "issue:43644", + "issue:43867", + "issue:43976", + "issue:43992", + "issue:44077", + "issue:44360", + "issue:44484", + "issue:44554", + "issue:44938", + "issue:45357", + "issue:45381", + "issue:45440" + ], + "soft_pairs": [ + "issue:43867|issue:45357", + "issue:43010|issue:43976", + "issue:44360|issue:45440", + "issue:44938|issue:45381", + "issue:43618|issue:44077", + "issue:41669|issue:44554", + "issue:43644|issue:44938", + "issue:43867|issue:44484", + "issue:38617|issue:43992", + "issue:36296|issue:43575" + ], + "prepared_review_unit_hash": "23db93b11b3f54e3803dbf2c06f03d40cb568c93bb633dd03114c93795af5d11", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13172, + "estimated_input_tokens": 3293, + "estimated_eval_tokens": 6842 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are mostly unrelated Transformers bugs and regressions that only share broad subsystem language. None of the soft pairs look like the same underlying issue or a mergeable duplicate PR-equivalent change.", + "confidence": 0.88, + "canonical_issue_reason": "No single canonical issue is evident because the set spans distinct bugs; if one anchor is needed, issue 45440 is the strongest current report due to being open, specific, and already referenced.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 45440 is the best representative anchor only in the sense of having the clearest active signal and concrete model-divergence impact, but it does not subsume the other issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43867", + "right": "issue:45357", + "accept": false, + "reason": "Both concern loading/saving state, but one is about sorted state_dict load failures and the other about wrong visual encoder keys in save_pretrained; different failure modes and code paths." + }, + { + "left": "issue:43010", + "right": "issue:43976", + "accept": false, + "reason": "One is a no_grad decoration proposal for cache/layer updates; the other is a Python version compatibility/load failure. No shared underlying bug." + }, + { + "left": "issue:44360", + "right": "issue:45440", + "accept": false, + "reason": "DSA ReLU indexing discussion is unrelated to DeepseekV3MoE divergence from remote implementation; same broad area, different concrete problem." + }, + { + "left": "issue:44938", + "right": "issue:45381", + "accept": false, + "reason": "Python 3.14 import/load failure and qwen2.5-vl video vision_position_ids bug are separate issues with different symptoms and fixes." + }, + { + "left": "issue:43618", + "right": "issue:44077", + "accept": false, + "reason": "CLIPOutput attentions regression and patchtsmixer post_init allowance are unrelated model API bugs." + }, + { + "left": "issue:41669", + "right": "issue:44554", + "accept": false, + "reason": "Import-star performance cleanup is not the same as the MPS attention correctness issue; one is refactoring/perf, the other is backend numerical correctness." + }, + { + "left": "issue:43644", + "right": "issue:44938", + "accept": false, + "reason": "Non-persistent buffer junk initialization and Python 3.14 loading failure are different root causes and fixes." + }, + { + "left": "issue:43867", + "right": "issue:44484", + "accept": false, + "reason": "State_dict load error when sorted and max_shard_size default discussion are not the same bug; one is loading behavior, the other is a save_pretrained size policy question." + }, + { + "left": "issue:38617", + "right": "issue:43992", + "accept": false, + "reason": "ImportError for layer_type_validation and missing embed_tokens.weight in UMT5Encoder.from_pretrained are separate loading regressions." + }, + { + "left": "issue:36296", + "right": "issue:43575", + "accept": false, + "reason": "Both mention tensor parallelism, but one is a generic training bug and the other is an OOM when loading a specific Qwen2 model with tp; not the same underlying defect." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 122, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:28282", + "issue:33453", + "issue:42548", + "issue:42890", + "issue:43010", + "issue:43232", + "issue:43502", + "issue:43618", + "issue:43723", + "issue:44062", + "issue:44075", + "issue:44112", + "issue:44351", + "issue:44556", + "issue:44857", + "issue:44964", + "issue:44977", + "issue:45341" + ], + "soft_pairs": [ + "issue:42890|issue:44964", + "issue:42890|issue:44112", + "issue:28282|issue:33453", + "issue:43618|issue:44857", + "issue:43618|issue:44977", + "issue:43502|issue:44062", + "issue:43010|issue:44075", + "issue:42890|issue:45341", + "issue:43723|issue:44556", + "issue:43010|issue:43232", + "issue:42548|issue:44351" + ], + "prepared_review_unit_hash": "5e2ac4d6ebb3a272fb2af15901968f12548b35bc442854930732f9477a1c2c2e", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13510, + "estimated_input_tokens": 3378, + "estimated_eval_tokens": 7012 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly heterogeneous; only the tokenizer/checkpoint-loading v5 reports look plausibly duplicate. The rest are distinct import, CI, cache, generation, or model-specific bugs.", + "confidence": 0.54, + "canonical_issue_reason": "Issue 43723 is the broadest representative of the only clearly related theme here: tokenizer/model loading regressions in v5. It is more general than the narrower checkpoint-upgrade report in 44556.", + "canonical_pr_reason": null, + "best_issue_reason": "43723 best captures the shared loading-regression theme and could serve as the umbrella issue for the closest match in this set.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42890", + "right": "issue:44964", + "accept": false, + "reason": "Both mention model-related failures, but one is a SAM HQ test flake and the other is a Phi-4 multimodal loading regression; different bugs and code paths." + }, + { + "left": "issue:42890", + "right": "issue:44112", + "accept": false, + "reason": "Both are CI/test failures, but they affect different tests and subsystems; this looks like generic flakiness rather than one underlying bug." + }, + { + "left": "issue:28282", + "right": "issue:33453", + "accept": false, + "reason": "One is an AutoModel/PyTorch import error and the other is a tokenizer-loading regression; too different to be the same bug." + }, + { + "left": "issue:43618", + "right": "issue:44857", + "accept": false, + "reason": "CLIPOutput attentions missing and an AMP/CUDA loss crash are unrelated model behaviors." + }, + { + "left": "issue:43618", + "right": "issue:44977", + "accept": false, + "reason": "Missing CLIP attentions and Qwen3.5 flash-attention generation problems are different failures in different models/code paths." + }, + { + "left": "issue:43502", + "right": "issue:44062", + "accept": false, + "reason": "Local-files-only network leakage and AddedToken argument duplication are unrelated loading issues." + }, + { + "left": "issue:43010", + "right": "issue:44075", + "accept": false, + "reason": "Cache/layer no_grad decoration and SGD optimizer args not being used are separate implementation bugs." + }, + { + "left": "issue:42890", + "right": "issue:45341", + "accept": false, + "reason": "A specific SAM HQ test flake and a general testing_utils bug are not the same underlying issue." + }, + { + "left": "issue:43723", + "right": "issue:44556", + "accept": true, + "reason": "Both describe the same v5 loading regression family: tokenizer/model checkpoint reload failures after upgrading to v5, likely sharing the same loading code path." + }, + { + "left": "issue:43010", + "right": "issue:43232", + "accept": false, + "reason": "These are both generation/caching related only at a high level; one is about no_grad on cache updates, the other about sync_gpus behavior in generation." + }, + { + "left": "issue:42548", + "right": "issue:44351", + "accept": false, + "reason": "Both are import errors from transformers, but they reference different missing symbols and likely different export regressions." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 123, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:30990", + "issue:31795", + "issue:36296", + "issue:43010", + "issue:43065", + "issue:43502", + "issue:44075", + "issue:44222", + "issue:44315", + "issue:44355", + "issue:44360", + "issue:44368", + "issue:44387", + "issue:44485", + "issue:44556", + "issue:44749", + "issue:45125", + "issue:45375" + ], + "soft_pairs": [ + "issue:44387|issue:44556", + "issue:44222|issue:45125", + "issue:43010|issue:43065", + "issue:45125|issue:45375", + "issue:44355|issue:44749", + "issue:30990|issue:31795", + "issue:43502|issue:44485", + "issue:43010|issue:44368", + "issue:44315|issue:44360", + "issue:36296|issue:44075" + ], + "prepared_review_unit_hash": "fd371777569ca9342ade03d7f977a65b220fc07bc2c3e6f442c8eba5e4e33287", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 10, + "serialized_chars": 13276, + "estimated_input_tokens": 3319, + "estimated_eval_tokens": 6894 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is heterogeneous: the soft-linked pairs are mostly unrelated issues sharing only broad thematic similarity. I found no true duplicates or mergeable PR-equivalents.", + "confidence": 0.91, + "canonical_issue_reason": "No single canonical issue fits the cluster; the items span distinct bugs, warnings, config problems, and docs topics rather than one underlying defect.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44387 is the strongest standalone bug report: it describes a concrete regression (higher CUDA reserved memory / OOM under int4 quantization), has clear scope, and attracted substantial discussion.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44387", + "right": "issue:44556", + "accept": false, + "reason": "Both involve version/regression complaints, but one is int4 memory/OOM and the other is checkpoint reload failure; different failure modes and fixes." + }, + { + "left": "issue:44222", + "right": "issue:45125", + "accept": false, + "reason": "FP8 save_pretrained for MoE and missing _tp_plan for tensor parallelism are separate model-handling bugs." + }, + { + "left": "issue:43010", + "right": "issue:43065", + "accept": false, + "reason": "`no_grad` on cache update and a dummy `Conv2d` in Sam3PixelDecoder are unrelated implementation issues." + }, + { + "left": "issue:45125", + "right": "issue:45375", + "accept": false, + "reason": "Both mention Qwen3_5Moe variants, but one is tensor parallelism metadata and the other is a missing config field dropped by strict validation." + }, + { + "left": "issue:44355", + "right": "issue:44749", + "accept": false, + "reason": "Compiled Python file errors and post-upgrade data filtering slowdown are not the same bug." + }, + { + "left": "issue:30990", + "right": "issue:31795", + "accept": false, + "reason": "Model loading hangs versus forward-method documentation confusion are clearly different issues." + }, + { + "left": "issue:43502", + "right": "issue:44485", + "accept": false, + "reason": "Offline file-loading API calls and GLM-5 RoPE implementation are unrelated." + }, + { + "left": "issue:43010", + "right": "issue:44368", + "accept": false, + "reason": "A cache/layer update decorator bug is not the same as a model warning about `tie_word_embeddings`." + }, + { + "left": "issue:44315", + "right": "issue:44360", + "accept": false, + "reason": "Liger Kernel application during `model_init` and a DSA indexer ReLU discussion are different code paths and symptoms." + }, + { + "left": "issue:36296", + "right": "issue:44075", + "accept": false, + "reason": "Tensor-parallel training bug and SGD args not being used are separate optimizer/model-parallel concerns." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 124, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:33453", + "issue:34689", + "issue:36296", + "issue:38175", + "issue:42831", + "issue:42890", + "issue:43232", + "issue:43299", + "issue:43502", + "issue:43874", + "issue:44222", + "issue:44230", + "issue:44355", + "issue:44373", + "issue:44556", + "issue:44655", + "issue:44961", + "issue:45071" + ], + "soft_pairs": [ + "issue:42890|issue:44373", + "issue:38175|issue:43502", + "issue:43502|issue:44961", + "issue:43299|issue:44556", + "issue:44355|issue:44655", + "issue:44355|issue:45071", + "issue:42831|issue:43874", + "issue:42890|issue:43232", + "issue:44222|issue:44230", + "issue:33453|issue:34689", + "issue:33453|issue:36296" + ], + "prepared_review_unit_hash": "06eff93f023b2084752c3558afee6956e6307227a37cecc23d011f66bfdb7227", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 11, + "serialized_chars": 13386, + "estimated_input_tokens": 3347, + "estimated_eval_tokens": 6950 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The soft pairs are broadly related only by high-level themes like loading regressions, FP8, or test/runtime failures, but none appear to describe the same concrete bug or change.", + "confidence": 0.94, + "canonical_issue_reason": "No single canonical issue: the set is heterogeneous, with unrelated topics spanning tokenizer/model loading, FP8 behavior, tests, docs, and generation internals.", + "canonical_pr_reason": null, + "best_issue_reason": "No global best issue stands out as a true cluster representative; 43502 is the broadest behavior report, but it is still unrelated to most of the other issues.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42890", + "right": "issue:44373", + "accept": false, + "reason": "One is about missing set_seed in a model integration test; the other is a wrong docstring for position_ids. Different problem classes." + }, + { + "left": "issue:38175", + "right": "issue:43502", + "accept": false, + "reason": "Unexpected zero probabilities in a specific model is unrelated to unwanted API requests when local_files_only=True." + }, + { + "left": "issue:43502", + "right": "issue:44961", + "accept": false, + "reason": "Local-files-only network requests and the unrelated 'racoon' issue do not describe the same bug." + }, + { + "left": "issue:43299", + "right": "issue:44556", + "accept": false, + "reason": "Both mention loading regressions, but they affect different versions/models and different failure modes; not the same underlying defect." + }, + { + "left": "issue:44355", + "right": "issue:44655", + "accept": false, + "reason": "Compiled Python file errors are unrelated to saving Pipeline objects with save_pretrained()." + }, + { + "left": "issue:44355", + "right": "issue:45071", + "accept": false, + "reason": "A compiled-file runtime error is not the same as PretrainedConfig type-checking breaking in v5.4.0." + }, + { + "left": "issue:42831", + "right": "issue:43874", + "accept": false, + "reason": "FP8 accuracy issues and a missing image-processor method causing AttributeError are distinct bugs." + }, + { + "left": "issue:42890", + "right": "issue:43232", + "accept": false, + "reason": "Test seeding gaps are unrelated to the generation kwarg update after sync_gpus." + }, + { + "left": "issue:44222", + "right": "issue:44230", + "accept": false, + "reason": "Both touch FP8/MoE, but one is about save_pretrained and the other about inference support; not mergeable as one fix." + }, + { + "left": "issue:33453", + "right": "issue:34689", + "accept": false, + "reason": "Both are loading regressions, but they involve different models/components and different root causes." + }, + { + "left": "issue:33453", + "right": "issue:36296", + "accept": false, + "reason": "Tokenizer loading regression and tensor-parallel training bug are unrelated." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 125, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:31795", + "issue:33357", + "issue:33453", + "issue:38617", + "issue:41628", + "issue:42548", + "issue:43576", + "issue:43673", + "issue:43824", + "issue:43827", + "issue:44351", + "issue:44485", + "issue:44492", + "issue:44908", + "issue:44938", + "issue:44961", + "issue:45071" + ], + "soft_pairs": [ + "issue:43827|issue:44961", + "issue:43673|issue:44492", + "issue:43576|issue:43824", + "issue:43576|issue:45071", + "issue:38617|issue:43576", + "issue:33357|issue:33453", + "issue:41628|issue:42548", + "issue:31795|issue:38617", + "issue:43576|issue:44351", + "issue:44908|issue:44961", + "issue:43576|issue:44938", + "issue:44485|issue:44961" + ], + "prepared_review_unit_hash": "81e8fdce64a6281fbf326d980f549a3d101c7d7bdd2659ff94c9b1f9cf5d006e", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 12, + "serialized_chars": 13091, + "estimated_input_tokens": 3273, + "estimated_eval_tokens": 6802 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items do not look like one duplicate cluster; they span unrelated documentation, import, tokenizer, scheduler, cache, and runtime/compatibility problems. I would not merge any of the soft pairs.", + "confidence": 0.88, + "canonical_issue_reason": "No single true canonical issue stands out because the items are mostly unrelated. If one must be picked as the cluster representative, 43576 is the broadest and most substantive v5 regression report with the most discussion/inbound references.", + "canonical_pr_reason": null, + "best_issue_reason": "43576 is the best representative only in the sense that it is the most prominent, user-facing regression report here; however, it is not actually a duplicate of the others.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43827", + "right": "issue:44961", + "accept": false, + "reason": "Docs referencing removed pipeline APIs and an unrelated issue titled \"racoon\" are clearly not the same bug." + }, + { + "left": "issue:43673", + "right": "issue:44492", + "accept": false, + "reason": "A chunked_prefill cache regression and a cache-strategy typo are different problems and code paths." + }, + { + "left": "issue:43576", + "right": "issue:43824", + "accept": false, + "reason": "Broken transformers env command vs missing Qwen2_5_VL export are unrelated failures." + }, + { + "left": "issue:43576", + "right": "issue:45071", + "accept": false, + "reason": "Different regressions: CLI/env command breakage versus type-checking breakage in PretrainedConfig." + }, + { + "left": "issue:38617", + "right": "issue:43576", + "accept": false, + "reason": "ImportError for layer_type_validation is unrelated to the transformers env command being broken." + }, + { + "left": "issue:33357", + "right": "issue:33453", + "accept": false, + "reason": "MacOS bus error with a community CLIP model and tokenizer loading regression are different underlying issues." + }, + { + "left": "issue:41628", + "right": "issue:42548", + "accept": false, + "reason": "Missing AutoImageProcessor and missing PreTrainedModel are distinct import/export problems." + }, + { + "left": "issue:31795", + "right": "issue:38617", + "accept": false, + "reason": "Documentation confusion in model.forward is not the same as an ImportError from configuration_utils." + }, + { + "left": "issue:43576", + "right": "issue:44351", + "accept": false, + "reason": "Both are import-adjacent, but they concern different symbols and likely different breakpoints." + }, + { + "left": "issue:44908", + "right": "issue:44961", + "accept": false, + "reason": "Scheduler kwargs being ignored is unrelated to the unrelated \"racoon\" issue." + }, + { + "left": "issue:43576", + "right": "issue:44938", + "accept": false, + "reason": "A broken env command and a Python 3.14 load failure are different compatibility/path issues." + }, + { + "left": "issue:44485", + "right": "issue:44961", + "accept": false, + "reason": "GLM-5 RoPE implementation discussion and an unrelated \"racoon\" issue do not match." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 126, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:36296", + "issue:41628", + "issue:41669", + "issue:43012", + "issue:43526", + "issue:43576", + "issue:43673", + "issue:43901", + "issue:44261", + "issue:44273", + "issue:44355", + "issue:44360", + "issue:44556", + "issue:44623", + "issue:44961", + "issue:45003", + "issue:45230" + ], + "soft_pairs": [ + "issue:44273|issue:45003", + "issue:36296|issue:43901", + "issue:41628|issue:44355", + "issue:36296|issue:43012", + "issue:36296|issue:43526", + "issue:44261|issue:44961", + "issue:44261|issue:44360", + "issue:43576|issue:44556", + "issue:44623|issue:45230", + "issue:41669|issue:43673" + ], + "prepared_review_unit_hash": "1ebec4095f64e9220d74973fd4476282921d80a910c589fbbcccc869902459d9", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 10, + "serialized_chars": 12471, + "estimated_input_tokens": 3118, + "estimated_eval_tokens": 6492 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a grab bag of unrelated issue reports rather than a duplicate set. No soft pair looks like the same underlying bug/change, so all should be rejected. If forced to pick an anchor, issue 44261 is the strongest standalone report by activity and specificity.", + "confidence": 0.94, + "canonical_issue_reason": "Issue 44261 is the most substantive open report in the set, with the highest discussion activity and concrete technical detail, so it is the best anchor if one must be chosen.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 44261 is the best global issue candidate because it is open, well-scoped, and has active discussion; the rest are either unrelated, overly generic, or weaker as standalone reports.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44273", + "right": "issue:45003", + "accept": false, + "reason": "Both touch loading/import behavior, but one is a lazy-loading bug and the other is an unsafe sys.modules access issue in modeling_utils; different failure modes and fixes." + }, + { + "left": "issue:36296", + "right": "issue:43901", + "accept": false, + "reason": "Tensor-parallel training bug vs TextClassificationPipeline docs/behavior mismatch; unrelated subsystems and code paths." + }, + { + "left": "issue:41628", + "right": "issue:44355", + "accept": false, + "reason": "AutoImageProcessor import failure and errors running compiled Python files are different symptoms with no clear shared root cause." + }, + { + "left": "issue:36296", + "right": "issue:43012", + "accept": false, + "reason": "A tensor-parallel training bug is not the same as a bfloat16 float32 precision warning; too generic to merge." + }, + { + "left": "issue:36296", + "right": "issue:43526", + "accept": false, + "reason": "Training parallelism issue vs BeitImageProcessorFast reduce_labels bug; separate components and fixes." + }, + { + "left": "issue:44261", + "right": "issue:44961", + "accept": false, + "reason": "MLA q_a_layernorm RMS epsilon precision issue is unrelated to the vague 'racoon' report." + }, + { + "left": "issue:44261", + "right": "issue:44360", + "accept": false, + "reason": "Missing config.rms_norm_eps in MLA q_a_layernorm and a missing ReLU in DSA indexer are different concrete bugs." + }, + { + "left": "issue:43576", + "right": "issue:44556", + "accept": false, + "reason": "Both mention v5 regressions, but one is a broken CLI env command and the other is checkpoint reload incompatibility; not the same bug." + }, + { + "left": "issue:44623", + "right": "issue:45230", + "accept": false, + "reason": "Processor.save_pretrained missing files is specific; 'Bug report' is too generic and provides no evidence of the same underlying problem." + }, + { + "left": "issue:41669", + "right": "issue:43673", + "accept": false, + "reason": "Import-star cleanup/performance issue and GenerationMixin cache missing in chunked_prefill are distinct problems despite both involving model internals." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 127, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:42371", + "issue:42890", + "issue:43352", + "issue:43408", + "issue:43576", + "issue:43976", + "issue:44038", + "issue:44246", + "issue:44273", + "issue:44315", + "issue:44393", + "issue:44492", + "issue:44556", + "issue:44704", + "issue:44908", + "issue:45071", + "issue:45092", + "issue:45125" + ], + "soft_pairs": [ + "issue:44393|issue:45071", + "issue:44038|issue:45125", + "issue:44556|issue:45092", + "issue:42371|issue:44492", + "issue:42890|issue:44908", + "issue:44246|issue:44273", + "issue:44704|issue:44908", + "issue:43408|issue:44315", + "issue:43576|issue:44908", + "issue:43976|issue:44393", + "issue:43352|issue:44393", + "issue:44908|issue:45071" + ], + "prepared_review_unit_hash": "271e53bd5bd802ca8e10a3e9680fad25e58304d7392c7bffd7fef9a50cb0886d", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 12, + "serialized_chars": 13809, + "estimated_input_tokens": 3453, + "estimated_eval_tokens": 7162 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These items are largely heterogeneous and only loosely linked by Transformers v5 / model-compatibility wording. The soft pairs are title-similar but describe different bugs, so none should be merged. The broadest representative issue is the v5 PretrainedConfig regression, but overall cluster coherence is weak.", + "confidence": 0.31, + "canonical_issue_reason": "Issue 45071 is the broadest core-regression report in the set: a v5 type-checking break in a central API, which makes it the least narrow representative among mostly model- or feature-specific bugs.", + "canonical_pr_reason": null, + "best_issue_reason": "45071 is the best single issue to stand in for this cluster because it is a core v5 compatibility failure rather than a narrow model-specific symptom.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44393", + "right": "issue:45071", + "accept": false, + "reason": "Different bugs: Qwen3-VL 2D bounding-box hallucination/error vs PretrainedConfig type checking regression." + }, + { + "left": "issue:44038", + "right": "issue:45125", + "accept": false, + "reason": "Both involve Qwen3 MOE models, but one is a Qwen3-VL-Moe bug and the other is missing tensor-parallel plan support; different code paths and fixes." + }, + { + "left": "issue:44556", + "right": "issue:45092", + "accept": false, + "reason": "Both are v5 upgrade compatibility reports, but one is checkpoint reload failure and the other is remote-code/meta-init incompatibility; not the same underlying bug." + }, + { + "left": "issue:42371", + "right": "issue:44492", + "accept": false, + "reason": "TF32 API control vs a typo in cache strategy text; unrelated issues." + }, + { + "left": "issue:42890", + "right": "issue:44908", + "accept": false, + "reason": "Flaky integration tests from missing set_seed calls are unrelated to inverse_sqrt scheduler kwargs propagation." + }, + { + "left": "issue:44246", + "right": "issue:44273", + "accept": false, + "reason": "Import slowness and lazy-loading failure are adjacent areas, but they are not the same concrete bug." + }, + { + "left": "issue:44704", + "right": "issue:44908", + "accept": false, + "reason": "AutoProcessor kwargs forwarding to cached_file and scheduler lr kwargs handling are distinct subsystems and fixes." + }, + { + "left": "issue:43408", + "right": "issue:44315", + "accept": false, + "reason": "Model-type warning for sam3_video/tracker vs Liger Kernel not applied with model_init; unrelated code paths." + }, + { + "left": "issue:43576", + "right": "issue:44908", + "accept": false, + "reason": "Broken `transformers env` command is unrelated to scheduler kwarg handling." + }, + { + "left": "issue:43976", + "right": "issue:44393", + "accept": false, + "reason": "Python version compatibility failure vs Qwen3-VL bounding-box output bug; different problems." + }, + { + "left": "issue:43352", + "right": "issue:44393", + "accept": false, + "reason": "Flash Attention 2.0 unsupported for NemotronH vs Qwen3-VL bbox hallucination/error; unrelated." + }, + { + "left": "issue:44908", + "right": "issue:45071", + "accept": false, + "reason": "Scheduler kwargs are ignored in one issue, while the other is a PretrainedConfig type-checking regression; same release era but not same bug." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 128, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:31795", + "issue:33453", + "issue:41669", + "issue:43704", + "issue:43827", + "issue:44077", + "issue:44295", + "issue:44297", + "issue:44315", + "issue:44351", + "issue:44492", + "issue:44829", + "issue:44861", + "issue:44908", + "issue:44945", + "issue:45230", + "issue:45341" + ], + "soft_pairs": [ + "issue:41669|issue:44351", + "issue:44315|issue:44829", + "issue:44297|issue:45230", + "issue:41669|issue:45341", + "issue:44295|issue:45230", + "issue:44492|issue:44945", + "issue:41669|issue:44492", + "issue:43827|issue:44908", + "issue:44077|issue:44861", + "issue:41669|issue:43704", + "issue:31795|issue:33453" + ], + "prepared_review_unit_hash": "3df1b5da951a2bd10a7ed9dec3c45df552e9794e35bc5e05864e12ba6d1401e9", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 11, + "serialized_chars": 12957, + "estimated_input_tokens": 3240, + "estimated_eval_tokens": 6736 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This is a noisy, false-positive cluster: the issues span unrelated areas (docs, tokenizer loading, cache internals, schedulers, Liger/flash-attn, VRAM leaks, and pipeline parallelism) and do not share a concrete underlying bug or change.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41669", + "right": "issue:44351", + "accept": false, + "reason": "Both mention imports, but one is about removing import * for performance and file explosion, while the other is a missing HybridCache export/import error; different code paths and fixes." + }, + { + "left": "issue:44315", + "right": "issue:44829", + "accept": false, + "reason": "Liger Kernel not applied with model_init and flash_attention_3 causing degenerate training are distinct model-training bugs with different triggers and remedies." + }, + { + "left": "issue:44297", + "right": "issue:45230", + "accept": false, + "reason": "tokenizer.save_pretrained writing the wrong tokenizer_class is a specific serialization bug; 'Bug report' is too vague and does not identify the same issue." + }, + { + "left": "issue:41669", + "right": "issue:45341", + "accept": false, + "reason": "import * usage causing slowdown/file explosion is unrelated to a small testing_utils bug." + }, + { + "left": "issue:44295", + "right": "issue:45230", + "accept": false, + "reason": "position_ids buffer read errors are a concrete tensor/buffer handling bug, not the same as an unspecified generic bug report." + }, + { + "left": "issue:44492", + "right": "issue:44945", + "accept": false, + "reason": "A typo in cache strategies is unrelated to incorrect LLM outputs under pipeline parallelism; these are different subsystems and failure modes." + }, + { + "left": "issue:41669", + "right": "issue:44492", + "accept": false, + "reason": "Import-star performance regression and a cache-strategy typo are not the same underlying defect." + }, + { + "left": "issue:43827", + "right": "issue:44908", + "accept": false, + "reason": "Docs still referencing pipeline() after removals is a documentation cleanup issue; inverse_sqrt ignoring lr_scheduler_kwargs is a runtime scheduler bug." + }, + { + "left": "issue:44077", + "right": "issue:44861", + "accept": false, + "reason": "patchtsmixer post_init allowance and _get_tied_weight_keys crashing on list input are unrelated model/config bugs." + }, + { + "left": "issue:41669", + "right": "issue:43704", + "accept": false, + "reason": "Import * slowdown is unrelated to Qwen3ForCausalLM VRAM leakage in multi-dataloader threads." + }, + { + "left": "issue:31795", + "right": "issue:33453", + "accept": false, + "reason": "Confusing forward-argument docs and a tokenizer loading regression do not point to the same bug or fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 129, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:31795", + "issue:34689", + "issue:37428", + "issue:43519", + "issue:43576", + "issue:43723", + "issue:43937", + "issue:44273", + "issue:44315", + "issue:44393", + "issue:44485", + "issue:44655", + "issue:44861", + "issue:44908", + "issue:45071", + "issue:45125", + "issue:45230", + "issue:45310" + ], + "soft_pairs": [ + "issue:43723|issue:44393", + "issue:44655|issue:44861", + "issue:43576|issue:44273", + "issue:43937|issue:44393", + "issue:45125|issue:45310", + "issue:44273|issue:44908", + "issue:45230|issue:45310", + "issue:43519|issue:44485", + "issue:43519|issue:44908", + "issue:31795|issue:34689", + "issue:44315|issue:44393", + "issue:31795|issue:37428", + "issue:44273|issue:45071" + ], + "prepared_review_unit_hash": "dcfd67491e091dcb39740d05d09c3731e49efb3c06f100326c7fbe4fe10a5e7c", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 13, + "serialized_chars": 14019, + "estimated_input_tokens": 3505, + "estimated_eval_tokens": 7266 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "The items are mostly unrelated bugs and docs issues. The only likely duplicate pair is the Qwen3.5 MoE loading/from_pretrained failure tied to missing `_tp_plan`; the rest do not look like the same underlying problem.", + "confidence": 0.66, + "canonical_issue_reason": "issue:45125 is the most concrete technical report in the only plausible duplicate pair: it states the likely root cause (`missing _tp_plan` for tensor parallelism) rather than the broader user-facing load error.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:45125 is the best representative issue for the cluster\u2019s only clear subtopic because it names the concrete code-path problem and is easier to deduplicate against than the generic symptom report.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43723", + "right": "issue:44393", + "accept": false, + "reason": "Tokenizer loading in v5 and Qwen3-VL 2D bbox output are different subsystems and failure modes." + }, + { + "left": "issue:44655", + "right": "issue:44861", + "accept": false, + "reason": "Pipeline save_pretrained support and tied-weight-key AttributeError are unrelated code paths." + }, + { + "left": "issue:43576", + "right": "issue:44273", + "accept": false, + "reason": "Broken `transformers env` CLI and lazy-loading behavior are not the same bug." + }, + { + "left": "issue:43937", + "right": "issue:44393", + "accept": false, + "reason": "GLM-5 generation config validation and Qwen3-VL bbox output errors are distinct model-specific issues." + }, + { + "left": "issue:45125", + "right": "issue:45310", + "accept": true, + "reason": "Both describe the same Qwen3.5 MoE loading/from_pretrained failure, with 45125 giving the concrete root cause (`_tp_plan` missing)." + }, + { + "left": "issue:44273", + "right": "issue:44908", + "accept": false, + "reason": "Lazy loading and inverse_sqrt scheduler kwargs handling are unrelated." + }, + { + "left": "issue:45230", + "right": "issue:45310", + "accept": false, + "reason": "A generic bug report is too vague to be confidently the same underlying issue as the Qwen3.5 MoE load error." + }, + { + "left": "issue:43519", + "right": "issue:44485", + "accept": false, + "reason": "Qwen3VL timestamp calculation and GLM-5 RoPE implementation are different model behaviors." + }, + { + "left": "issue:43519", + "right": "issue:44908", + "accept": false, + "reason": "Timestamp calculation in a processor is unrelated to scheduler kwargs handling." + }, + { + "left": "issue:31795", + "right": "issue:34689", + "accept": false, + "reason": "Model.forward docs confusion and Llama 3.2 Vision model loading breakage are not the same bug." + }, + { + "left": "issue:44315", + "right": "issue:44393", + "accept": false, + "reason": "Liger Kernel application with `model_init` and Qwen3-VL bbox output are different problems." + }, + { + "left": "issue:31795", + "right": "issue:37428", + "accept": false, + "reason": "Documentation clarity in `model.forward` does not match an import failure in flash-attention utilities." + }, + { + "left": "issue:44273", + "right": "issue:45071", + "accept": false, + "reason": "Lazy loading behavior and `PretrainedConfig` type checking are unrelated regressions." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 130, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:42371", + "issue:42890", + "issue:42913", + "issue:43066", + "issue:43502", + "issue:43519", + "issue:43525", + "issue:43618", + "issue:43704", + "issue:44485", + "issue:44492", + "issue:44556", + "issue:44623", + "issue:44861", + "issue:44908", + "issue:44998", + "issue:45290", + "issue:45341" + ], + "soft_pairs": [ + "issue:43704|issue:44485", + "issue:42371|issue:43704", + "issue:43502|issue:43519", + "issue:43519|issue:44623", + "issue:43519|issue:43704", + "issue:42913|issue:43066", + "issue:42890|issue:43704", + "issue:44908|issue:44998", + "issue:44861|issue:45290", + "issue:44998|issue:45341", + "issue:44492|issue:44998", + "issue:43618|issue:44998", + "issue:44556|issue:44998", + "issue:43525|issue:44861" + ], + "prepared_review_unit_hash": "afc99282bc677266a7f7187ca5442c4cab3c50dd251f5437721d4e029bffc031", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 18, + "item_count": 18, + "soft_pair_count": 14, + "serialized_chars": 14317, + "estimated_input_tokens": 3580, + "estimated_eval_tokens": 7416 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a grab-bag of unrelated bug reports. The soft-similarity pairs mostly share broad vocabulary or subsystems, but they do not describe the same concrete defect, so none should be merged as duplicates.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43704", + "right": "issue:44485", + "accept": false, + "reason": "VRAM leakage in multi-threaded dataloading is unrelated to a GLM-5 RoPE implementation discussion." + }, + { + "left": "issue:42371", + "right": "issue:43704", + "accept": false, + "reason": "TF32 configuration guidance and a VRAM leak in Qwen3ForCausalLM are different bug categories with no shared code path." + }, + { + "left": "issue:43502", + "right": "issue:43519", + "accept": false, + "reason": "Unexpected network calls with local_files_only and a Qwen3VL timestamp bug are separate processor/model issues." + }, + { + "left": "issue:43519", + "right": "issue:44623", + "accept": false, + "reason": "Wrong timestamp calculation and missing processor save_pretrained files affect different processor behaviors." + }, + { + "left": "issue:43519", + "right": "issue:43704", + "accept": false, + "reason": "A timestamp computation bug in Qwen3VL is not the same as a VRAM leak in Qwen3ForCausalLM." + }, + { + "left": "issue:42913", + "right": "issue:43066", + "accept": false, + "reason": "Both concern tokenizer v5, but one reports a general v4-to-v5 behavior change while the other is specifically a wrong decoder type; not the same concrete bug." + }, + { + "left": "issue:42890", + "right": "issue:43704", + "accept": false, + "reason": "Test flakiness from missing set_seed calls is unrelated to a runtime VRAM leak." + }, + { + "left": "issue:44908", + "right": "issue:44998", + "accept": false, + "reason": "Scheduler kwargs handling and an unrelated placeholder/junk issue do not match the same defect." + }, + { + "left": "issue:44861", + "right": "issue:45290", + "accept": false, + "reason": "A tied-weights key AttributeError and a chat-template crash with tool-call messages are different code paths." + }, + { + "left": "issue:44998", + "right": "issue:45341", + "accept": false, + "reason": "The placeholder 'Unemployment' issue is not the same as a testing_utils bug report." + }, + { + "left": "issue:44492", + "right": "issue:44998", + "accept": false, + "reason": "A cache-strategy typo and an unrelated placeholder issue are not duplicates." + }, + { + "left": "issue:43618", + "right": "issue:44998", + "accept": false, + "reason": "CLIPOutput attentions assignment regression is unrelated to the placeholder 'Unemployment' issue." + }, + { + "left": "issue:44556", + "right": "issue:44998", + "accept": false, + "reason": "Checkpoint reload incompatibility across versions is unrelated to the placeholder issue." + }, + { + "left": "issue:43525", + "right": "issue:44861", + "accept": false, + "reason": "Missing pad_token_id on Llama4Config and tied-weight key handling are distinct attribute errors in different components." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 2, + "component_count": 12, + "review_unit_index": 131, + "review_unit_count": 131, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-15354-349", + "nodes": [ + "issue:31515", + "issue:31795", + "issue:33453", + "issue:36296", + "issue:36683", + "issue:38617", + "issue:43298", + "issue:43572", + "issue:44230", + "issue:44623", + "issue:44861", + "issue:45310" + ], + "soft_pairs": [ + "issue:43572|issue:44861", + "issue:31515|issue:38617", + "issue:44230|issue:44623", + "issue:31515|issue:36296", + "issue:31515|issue:31795", + "issue:31515|issue:33453", + "issue:44230|issue:45310", + "issue:31515|issue:36683", + "issue:43298|issue:44230" + ], + "prepared_review_unit_hash": "6f6be992d469630a392669626992c4aec443d610612b9d27d2cf9c808db679e9", + "component_budget": { + "node_count": 349, + "item_count": 349, + "soft_pair_count": 1303, + "serialized_chars": 672381, + "estimated_input_tokens": 168096, + "estimated_eval_tokens": 336448 + }, + "budget": { + "node_count": 12, + "item_count": 12, + "soft_pair_count": 9, + "serialized_chars": 9457, + "estimated_input_tokens": 2365, + "estimated_eval_tokens": 4986 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These issues are mostly only loosely related by broad model-loading/configuration terminology. None of the soft pairs looks like the same underlying bug or change, so I would not merge any of them as duplicates.", + "confidence": 0.71, + "canonical_issue_reason": "issue:45310 is the most concrete and actionable bug report in the set: it names a specific from_pretrained failure, a version boundary (transformers>=5.4.0), and a concrete model family (Qwen3.5 MoE).", + "canonical_pr_reason": null, + "best_issue_reason": "If one issue must represent this set, issue:45310 is the best anchor because it is specific, reproducible, and clearly about a real runtime failure rather than docs or a generic feature request.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43572", + "right": "issue:44861", + "accept": false, + "reason": "Both are config/loading-related, but one is a missing StableLmConfig field after a version update and the other is an AttributeError in tied-weight key handling. Different code paths and different failure modes." + }, + { + "left": "issue:31515", + "right": "issue:38617", + "accept": false, + "reason": "One reports slow checkpoint loading in from_pretrained; the other is an ImportError for layer_type_validation. Not the same bug." + }, + { + "left": "issue:44230", + "right": "issue:44623", + "accept": false, + "reason": "FP8 inference support for Qwen models is a feature/support request, while processor.save_pretrained missing files is a packaging/save bug. Different underlying changes." + }, + { + "left": "issue:31515", + "right": "issue:36296", + "accept": false, + "reason": "Checkpoint loading performance and tensor-parallel training are unrelated problems." + }, + { + "left": "issue:31515", + "right": "issue:31795", + "accept": false, + "reason": "Slow checkpoint loading is not the same as confusing forward() documentation for input_ids/past_key_values." + }, + { + "left": "issue:31515", + "right": "issue:33453", + "accept": false, + "reason": "Both mention loading, but one is a performance complaint and the other is a tokenizer-loading regression. Too broad to treat as the same concrete bug." + }, + { + "left": "issue:44230", + "right": "issue:45310", + "accept": false, + "reason": "Both mention Qwen/MoE loading, but one asks for FP8 support and the other reports a from_pretrained error on a specific version. Not the same fixable code-path problem." + }, + { + "left": "issue:31515", + "right": "issue:36683", + "accept": false, + "reason": "Checkpoint loading slowness and Gemma3Config missing vocab_size are distinct issues." + }, + { + "left": "issue:43298", + "right": "issue:44230", + "accept": false, + "reason": "Continuous batching not supporting audio models and Qwen3.5 FP8 inference support are different feature gaps in different subsystems." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 3, + "component_count": 12, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-27088-4", + "nodes": [ + "issue:27088", + "issue:43834", + "issue:44365", + "issue:44370" + ], + "soft_pairs": [ + "issue:27088|issue:44370", + "issue:27088|issue:44365", + "issue:27088|issue:43834", + "issue:43834|issue:44370", + "issue:43834|issue:44365", + "issue:44365|issue:44370" + ], + "prepared_review_unit_hash": "70820322795c9013a91e4a95c24e07c6a005727afa435f8808d3d62379217a2a", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4454, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4454, + "estimated_input_tokens": 1114, + "estimated_eval_tokens": 2484 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "This cluster is a set of highly similar documentation-translation issues, mostly templated duplicates of the same i18n request pattern. The Turkish-specific issue is the most concrete representative, while the others look like near-identical template instances.", + "confidence": 0.78, + "canonical_issue_reason": "issue #27088 is the only one with a concrete language in the title (Turkish), so it best anchors the cluster as a real docs-translation request rather than a placeholder template.", + "canonical_pr_reason": null, + "best_issue_reason": "#27088 is the clearest and most specific issue to keep as the representative duplicate target for this cluster.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:27088", + "right": "issue:44370", + "accept": true, + "reason": "Same underlying docs-translation issue pattern; body similarity is high and the title difference appears to be templated language labeling rather than a distinct bug." + }, + { + "left": "issue:27088", + "right": "issue:44365", + "accept": true, + "reason": "Near-identical translation request with only template-level language placeholders differing; no evidence of a distinct underlying issue." + }, + { + "left": "issue:27088", + "right": "issue:43834", + "accept": true, + "reason": "Same i18n docs-translation task family; highly similar body text suggests a duplicate or template variant rather than a separate issue." + }, + { + "left": "issue:43834", + "right": "issue:44370", + "accept": true, + "reason": "These two look like the same templated docs-translation issue with only minor placeholder/body variation." + }, + { + "left": "issue:43834", + "right": "issue:44365", + "accept": true, + "reason": "Essentially the same templated translation request; body overlap is near-total and no distinct target is visible." + }, + { + "left": "issue:44365", + "right": "issue:44370", + "accept": true, + "reason": "Near-duplicate template issues with almost identical bodies and no distinguishing issue target or code-path difference." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet: one issue has a concrete language in the title, while the others are clear template placeholders, and the pairwise Jaccard scores are very high across the cluster. The duplicate/near-duplicate judgments are conservative enough for this evidence." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 4, + "component_count": 12, + "review_unit_index": 1, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-36817-13", + "nodes": [ + "issue:36817", + "issue:37015", + "issue:41211", + "issue:42503", + "issue:42738", + "issue:43671", + "issue:43837", + "issue:43909", + "issue:44121", + "issue:44205", + "issue:45027", + "issue:45177", + "issue:45322" + ], + "soft_pairs": [ + "issue:43909|issue:44205", + "issue:43671|issue:43837", + "issue:44121|issue:45322", + "issue:43837|issue:43909", + "issue:43837|issue:45322", + "issue:37015|issue:43909", + "issue:43671|issue:45177", + "issue:37015|issue:43837", + "issue:43671|issue:45027", + "issue:37015|issue:44121", + "issue:43837|issue:44121", + "issue:43837|issue:45177", + "issue:43671|issue:43909", + "issue:43671|issue:44121", + "issue:41211|issue:43671", + "issue:36817|issue:41211", + "issue:37015|issue:42503", + "issue:36817|issue:43671", + "issue:37015|issue:45027", + "issue:42738|issue:45027", + "issue:43837|issue:45027", + "issue:42503|issue:43837", + "issue:42503|issue:42738", + "issue:42503|issue:43671" + ], + "prepared_review_unit_hash": "e2dd0be849b37e749324e16c54f2a8c06618f37b733551ce3e83395c64fa38f9", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 25, + "serialized_chars": 16006, + "estimated_input_tokens": 4002, + "estimated_eval_tokens": 8260 + }, + "budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 24, + "serialized_chars": 14120, + "estimated_input_tokens": 3530, + "estimated_eval_tokens": 7316 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a set of distinct model-request issues, not duplicates: different model families, different modalities (text, audio, ASR/TTS, OCR, sparse transformer, perception encoder), and no shared concrete code change.", + "confidence": 0.98, + "canonical_issue_reason": "No single canonical issue fits: these requests target different models and capabilities, so they are not the same underlying bug/change.", + "canonical_pr_reason": null, + "best_issue_reason": "No issue in this set is a safe duplicate canonical; they are only loosely related by being model-addition requests.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43909", + "right": "issue:44205", + "accept": false, + "reason": "Both are model-add requests, but LFM2.5 Audio 1.5B and SAM3-LiteText are different model families and tasks." + }, + { + "left": "issue:43671", + "right": "issue:43837", + "accept": false, + "reason": "Qwen3-TTS and Qwen3-ASR are related family names, but they are different capabilities and not the same change." + }, + { + "left": "issue:44121", + "right": "issue:45322", + "accept": false, + "reason": "Weight-sparse transformer support and EUPE are unrelated model architectures/requests." + }, + { + "left": "issue:43837", + "right": "issue:43909", + "accept": false, + "reason": "ASR support versus audio model addition are different feature requests, not one concrete duplicate." + }, + { + "left": "issue:43837", + "right": "issue:45322", + "accept": false, + "reason": "Qwen3-ASR and EUPE target different model types and implementation paths." + }, + { + "left": "issue:37015", + "right": "issue:43909", + "accept": false, + "reason": "NeoBERT and LFM2.5 Audio 1.5B are unrelated model additions." + }, + { + "left": "issue:43671", + "right": "issue:45177", + "accept": false, + "reason": "Qwen3-TTS support and DeepSeek-OCR2 are different modalities and code paths." + }, + { + "left": "issue:37015", + "right": "issue:43837", + "accept": false, + "reason": "NeoBERT and Qwen3-ASR are unrelated model requests." + }, + { + "left": "issue:43671", + "right": "issue:45027", + "accept": false, + "reason": "Qwen3-TTS and Voxtral-4B-TTS-2603 are both TTS-related, but they are different models and not the same underlying request." + }, + { + "left": "issue:37015", + "right": "issue:44121", + "accept": false, + "reason": "NeoBERT and OpenAI weight-sparse transformer support are different model families and implementation targets." + }, + { + "left": "issue:43837", + "right": "issue:44121", + "accept": false, + "reason": "ASR support and sparse-transformer support are different features with different code paths." + }, + { + "left": "issue:43837", + "right": "issue:45177", + "accept": false, + "reason": "Qwen3-ASR and DeepSeek-OCR2 are different modalities and model requests." + }, + { + "left": "issue:43671", + "right": "issue:43909", + "accept": false, + "reason": "TTS support for Qwen3 and LFM2.5 Audio are related only broadly by audio, but not the same model/change." + }, + { + "left": "issue:43671", + "right": "issue:44121", + "accept": false, + "reason": "Qwen3-TTS and circuit-sparsity transformer support are unrelated changes." + }, + { + "left": "issue:41211", + "right": "issue:43671", + "accept": false, + "reason": "DEIMv2 and Qwen3-TTS are different model additions with different implementation needs." + }, + { + "left": "issue:36817", + "right": "issue:41211", + "accept": false, + "reason": "EuroBert config support and DEIMv2 are unrelated model requests." + }, + { + "left": "issue:37015", + "right": "issue:42503", + "accept": false, + "reason": "NeoBERT and ModernVBERT are distinct model requests; similar naming does not make them duplicates." + }, + { + "left": "issue:36817", + "right": "issue:43671", + "accept": false, + "reason": "EuroBert config support and Qwen3-TTS are unrelated model changes." + }, + { + "left": "issue:37015", + "right": "issue:45027", + "accept": false, + "reason": "NeoBERT and Voxtral-4B-TTS-2603 are different models and tasks." + }, + { + "left": "issue:42738", + "right": "issue:45027", + "accept": false, + "reason": "BERT-like models with RoPE and Voxtral TTS are not the same concrete request." + }, + { + "left": "issue:43837", + "right": "issue:45027", + "accept": false, + "reason": "Qwen3-ASR and Voxtral TTS are different model support requests." + }, + { + "left": "issue:42503", + "right": "issue:43837", + "accept": false, + "reason": "ModernVBERT models and Qwen3-ASR are different feature requests." + }, + { + "left": "issue:42503", + "right": "issue:42738", + "accept": false, + "reason": "ModernVBERT models and BERT-like models with RoPE overlap only thematically, not as duplicates." + }, + { + "left": "issue:42503", + "right": "issue:43671", + "accept": false, + "reason": "ModernVBERT models and Qwen3-TTS are unrelated model additions." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 4, + "component_count": 12, + "review_unit_index": 2, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-36817-13", + "nodes": [ + "issue:42503", + "issue:45027" + ], + "soft_pairs": [ + "issue:42503|issue:45027" + ], + "prepared_review_unit_hash": "0907a1b34a1ba66651e2dae7e946095d177a986876b3f96e5f0ce3191e950f26", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 25, + "serialized_chars": 16006, + "estimated_input_tokens": 4002, + "estimated_eval_tokens": 8260 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1434, + "estimated_input_tokens": 359, + "estimated_eval_tokens": 974 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These are two unrelated model-support requests: one for ModernVBERT and one for Voxtral-4B-TTS-2603. They do not describe the same underlying bug or change, so they should not be clustered as duplicates.", + "confidence": 0.96, + "canonical_issue_reason": "issue #45027 is the more active and current request, with ongoing discussion and inbound references; it is the better representative for this cluster if one must be chosen.", + "canonical_pr_reason": null, + "best_issue_reason": "issue #45027 is the best issue candidate because it is open, specific, and currently engaged; #42503 is a separate older request for a different model family.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:42503", + "right": "issue:45027", + "accept": false, + "reason": "Different model support requests for distinct models (ModernVBERT vs Voxtral-4B-TTS-2603); no shared underlying bug or code change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 5, + "component_count": 12, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-37934-6", + "nodes": [ + "issue:37934", + "issue:42936", + "issue:43240", + "issue:43668", + "issue:43813", + "issue:44031" + ], + "soft_pairs": [ + "issue:37934|issue:43668", + "issue:42936|issue:43240", + "issue:42936|issue:44031", + "issue:42936|issue:43813", + "issue:42936|issue:43668", + "issue:43240|issue:43668", + "issue:43668|issue:44031", + "issue:43668|issue:43813" + ], + "prepared_review_unit_hash": "f9f0e8ed02afd5eb0d4b7d7d1d88c6b90442e4d09f423a4ad602b3e0655ea657", + "component_budget": { + "node_count": 6, + "item_count": 6, + "soft_pair_count": 8, + "serialized_chars": 6291, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 + }, + "budget": { + "node_count": 6, + "item_count": 6, + "soft_pair_count": 8, + "serialized_chars": 6291, + "estimated_input_tokens": 1573, + "estimated_eval_tokens": 3402 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are six distinct issue reports touching different models/subsystems and bug types (norm math, unused config, kwargs plumbing, type hints, typo, tokenizer regex warning). None looks like a true duplicate of another.", + "confidence": 0.9, + "canonical_issue_reason": "#44031 is the strongest representative issue in this set: it describes a broad, user-visible regression with a clear version boundary and affects many tokenizer code paths.", + "canonical_pr_reason": null, + "best_issue_reason": "#44031 is the best standalone issue to prioritize because it has the widest apparent impact and clearest regression framing.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:37934", + "right": "issue:43668", + "accept": false, + "reason": "Different models and different failure modes: a possible Llama4 normalization design question vs. a ModernBERT config type-hint mistake." + }, + { + "left": "issue:42936", + "right": "issue:43240", + "accept": false, + "reason": "Mask2Former ignore_value handling is a model-specific parameter bug, while #43240 is a generic loss kwargs plumbing issue." + }, + { + "left": "issue:42936", + "right": "issue:44031", + "accept": false, + "reason": "Unrelated subsystems and symptoms: Mask2Former training config bug vs. tokenizer regex warning regression." + }, + { + "left": "issue:42936", + "right": "issue:43813", + "accept": false, + "reason": "Mask2Former ignore_value behavior and a quantization attribute typo are different bugs with no shared code-path indicated." + }, + { + "left": "issue:42936", + "right": "issue:43668", + "accept": false, + "reason": "Different models and problem classes: unused ignore_value vs. incorrect type annotation for norm_eps." + }, + { + "left": "issue:43240", + "right": "issue:43668", + "accept": false, + "reason": "Generic loss kwargs propagation bug is not the same as a config type-hint error; no overlapping concrete code path is shown." + }, + { + "left": "issue:43668", + "right": "issue:44031", + "accept": false, + "reason": "ModernBERTConfig type hint issue and tokenizer regex warning regression are unrelated changes in different parts of the library." + }, + { + "left": "issue:43668", + "right": "issue:43813", + "accept": false, + "reason": "A type-hint problem in ModernBERTConfig is not the same as a typo in a quantization attribute name." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 6, + "component_count": 12, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41084-2", + "nodes": [ + "issue:41084", + "issue:43037" + ], + "soft_pairs": [ + "issue:41084|issue:43037" + ], + "prepared_review_unit_hash": "490dd9e57cc2a71b4f003f28c61302b00a02e57d650acd918673cdaeac49c13b", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1479, + "estimated_input_tokens": 370, + "estimated_eval_tokens": 996 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1479, + "estimated_input_tokens": 370, + "estimated_eval_tokens": 996 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two issues are about different topics: one is a vague request for block decoding, the other is a specific feature request for DeepSeek v3.2 support. They do not look like duplicates or the same underlying change.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": "If one issue must be picked as the better representative, #43037 is more concrete and actionable, but it is not a duplicate of #41084.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:41084", + "right": "issue:43037", + "accept": false, + "reason": "Different underlying requests with no shared code-path evidence: block decoding behavior vs model/version support for DeepSeek v3.2." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 7, + "component_count": 12, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43598-2", + "nodes": [ + "issue:43598", + "issue:43600" + ], + "soft_pairs": [ + "issue:43598|issue:43600" + ], + "prepared_review_unit_hash": "a3558157ccd8cdd6bbf12299062678a5b42a68cf325739a082623274f863d7aa", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1582, + "estimated_input_tokens": 396, + "estimated_eval_tokens": 1048 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1582, + "estimated_input_tokens": 396, + "estimated_eval_tokens": 1048 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are two distinct closed issues that only share a loose Trainer/custom-trainer theme: one is about `remove_unused_column` customizability, the other about FLOPS estimation for custom trainers. They do not describe the same underlying bug or change.", + "confidence": 0.96, + "canonical_issue_reason": "No single canonical issue: the titles and described problems are different enough that they should not be merged as duplicates.", + "canonical_pr_reason": null, + "best_issue_reason": "If one must be chosen as the closest cluster representative, issue 43598 is slightly more specific to Trainer behavior, but neither issue is a strong duplicate representative for the other.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43598", + "right": "issue:43600", + "accept": false, + "reason": "Reject: `remove_unused_column` customizability and FLOPS estimation for custom trainers are separate Trainer concerns with different symptoms, code paths, and likely fixes." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 8, + "component_count": 12, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43684-2", + "nodes": [ + "issue:43684", + "issue:44405" + ], + "soft_pairs": [ + "issue:43684|issue:44405" + ], + "prepared_review_unit_hash": "69d4838ba3676230573b0aae2e6e35d4d0b02cd31fbfe95d3fc364f429b26c3a", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1611, + "estimated_input_tokens": 403, + "estimated_eval_tokens": 1062 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1611, + "estimated_input_tokens": 403, + "estimated_eval_tokens": 1062 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are not duplicates: one requests Qwen3-Omni registration for AutoModel/AutoModelForConditionalGeneration, the other requests AutoModelForSequenceClassification support for Qwen3.5. Different model families and different heads.", + "confidence": 0.97, + "canonical_issue_reason": "No single canonical issue; the requests target different model families and different auto-model classes.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue 43684 is the broader, earlier request and is the better representative if one issue must be chosen, but it should not subsume 44405.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43684", + "right": "issue:44405", + "accept": false, + "reason": "Different underlying changes: Qwen3-Omni model registration for generic/conditional-generation auto classes vs Qwen3.5 sequence-classification support. They do not look like the same bug or fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 9, + "component_count": 12, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43835-4", + "nodes": [ + "issue:43835", + "issue:43845", + "issue:43846", + "issue:43859" + ], + "soft_pairs": [ + "issue:43835|issue:43845", + "issue:43835|issue:43846", + "issue:43835|issue:43859", + "issue:43845|issue:43846", + "issue:43845|issue:43859", + "issue:43846|issue:43859" + ], + "prepared_review_unit_hash": "8390bab7b5cb8dce444eb5039eabd011a3dad730616e1e9b3adc1201189ac0ee", + "component_budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4264, + "estimated_input_tokens": 1066, + "estimated_eval_tokens": 2388 + }, + "budget": { + "node_count": 4, + "item_count": 4, + "soft_pair_count": 6, + "serialized_chars": 4264, + "estimated_input_tokens": 1066, + "estimated_eval_tokens": 2388 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The cluster contains one unrelated issue (\u201cFett\u201d) and three very weakly specified issues with the same generic title (\u201chuggingface \u201d). There isn\u2019t enough evidence to merge any pair as confirmed duplicates, but 43845 is the best representative among the similar huggingface issues because it is the earliest one.", + "confidence": 0.58, + "canonical_issue_reason": "Issue 43845 is the earliest of the nearly identical \u2018huggingface \u2019 reports, so it is the best canonical representative within that subgroup.", + "canonical_pr_reason": null, + "best_issue_reason": "43845 is the strongest issue to keep as the cluster representative: earliest timestamp, same generic title as the other huggingface issues, and no evidence that 43835 is related.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:43835", + "right": "issue:43845", + "accept": false, + "reason": "Only weak title similarity; \u2018Fett\u2019 and \u2018huggingface \u2019 do not indicate the same underlying bug, and there are no shared targets/filenames or body clues." + }, + { + "left": "issue:43835", + "right": "issue:43846", + "accept": false, + "reason": "Only weak title similarity; \u2018Fett\u2019 is unrelated to the generic huggingface reports, with no supporting evidence of a shared bug." + }, + { + "left": "issue:43835", + "right": "issue:43859", + "accept": false, + "reason": "Only weak title similarity; there is no concrete evidence connecting \u2018Fett\u2019 to the huggingface issue reports." + }, + { + "left": "issue:43845", + "right": "issue:43846", + "accept": false, + "reason": "Same generic title, but the bodies are too sparse to verify the same underlying bug; title similarity alone is not enough." + }, + { + "left": "issue:43845", + "right": "issue:43859", + "accept": false, + "reason": "Both are titled \u2018huggingface \u2019, but there is no concrete description, target, or code-path evidence to confirm they are duplicates." + }, + { + "left": "issue:43846", + "right": "issue:43859", + "accept": false, + "reason": "Both are titled \u2018huggingface \u2019, but the reports are too underspecified to safely merge as the same issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 10, + "component_count": 12, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44371-5", + "nodes": [ + "issue:44371", + "issue:44418", + "issue:44572", + "issue:44573", + "issue:44937" + ], + "soft_pairs": [ + "issue:44371|issue:44572", + "issue:44371|issue:44573", + "issue:44371|issue:44418", + "issue:44371|issue:44937", + "issue:44418|issue:44572", + "issue:44418|issue:44573", + "issue:44572|issue:44573", + "issue:44572|issue:44937", + "issue:44573|issue:44937" + ], + "prepared_review_unit_hash": "f2844d3d941e9f9175ea562da243680407ea1db7a03b11511901e534fe09f3a8", + "component_budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 9, + "serialized_chars": 5794, + "estimated_input_tokens": 1449, + "estimated_eval_tokens": 3154 + }, + "budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 9, + "serialized_chars": 5794, + "estimated_input_tokens": 1449, + "estimated_eval_tokens": 3154 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are low-signal, likely unrelated placeholder/spam issues. The only clear similarity is identical spam-like titles on a few issues, but there is no substantive bug/change evidence to justify deduping.", + "confidence": 0.86, + "canonical_issue_reason": "issue:44418 is the most descriptive of the set; the others are either '' placeholders or similarly low-information titles, so it is the best available anchor if one must be chosen.", + "canonical_pr_reason": null, + "best_issue_reason": "issue:44418 is the least ambiguous and most human-readable issue in the cluster, but the cluster is too noisy to strongly recommend any true canonical issue.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:44371", + "right": "issue:44572", + "accept": false, + "reason": "Both are titled '', but there is no body/content evidence that they report the same underlying issue or change; treat as separate spam submissions." + }, + { + "left": "issue:44371", + "right": "issue:44573", + "accept": false, + "reason": "Identical spam-like titles are not enough to establish a duplicate; no substantive problem statement is available." + }, + { + "left": "issue:44371", + "right": "issue:44418", + "accept": false, + "reason": "One is a spam placeholder and the other is a generic documentation suggestion; they do not appear to be the same underlying issue." + }, + { + "left": "issue:44371", + "right": "issue:44937", + "accept": false, + "reason": "The titles are unrelated ('' vs 'Check out \"Google LLC\"'); no shared bug/change is evident." + }, + { + "left": "issue:44418", + "right": "issue:44572", + "accept": false, + "reason": "Generic documentation suggestion versus spam placeholder; no evidence they describe the same request or defect." + }, + { + "left": "issue:44418", + "right": "issue:44573", + "accept": false, + "reason": "Generic documentation suggestion versus spam placeholder; not the same underlying issue." + }, + { + "left": "issue:44572", + "right": "issue:44573", + "accept": false, + "reason": "Both are titled '', but there is no actual bug/change description to confirm they are duplicates rather than separate spam reports." + }, + { + "left": "issue:44572", + "right": "issue:44937", + "accept": false, + "reason": "Spam placeholder and an unrelated promotional-looking title; no basis for deduplication." + }, + { + "left": "issue:44573", + "right": "issue:44937", + "accept": false, + "reason": "Spam placeholder and an unrelated promotional-looking title; no evidence of a shared underlying issue." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 11, + "component_count": 12, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45145-2", + "nodes": [ + "issue:45145", + "issue:45151" + ], + "soft_pairs": [ + "issue:45145|issue:45151" + ], + "prepared_review_unit_hash": "99f53a8ce71f8a07655e27a5a4bc59969834dcdb7b195ac52d68f35a50765792", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1646, + "estimated_input_tokens": 412, + "estimated_eval_tokens": 1080 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 1646, + "estimated_input_tokens": 412, + "estimated_eval_tokens": 1080 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two closed issues appear to be duplicates of the same report: identical title, very similar body length, and no differentiating targets or filenames.", + "confidence": 0.99, + "canonical_issue_reason": "Issue #45145 is the better canonical choice because it is earlier and already has an inbound reference, while #45151 appears to be a near-identical duplicate.", + "canonical_pr_reason": null, + "best_issue_reason": "Issue #45145 is the strongest representative of the cluster since it is the earliest submission and has the most linkage activity among the two.", + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "issue:45145", + "right": "issue:45151", + "accept": true, + "reason": "Same title and highly similar content; no evidence of distinct underlying bug or change." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The assessment is grounded in the packet: both issues have identical titles, very similar body lengths, no filenames or explicit targets, and issue 45145 is earlier with the only inbound reference. The duplicate/near-duplicate conclusion is phrased cautiously enough, and the soft-edge verdict is consistent with the available similarity signal." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "issue", + "component_index": 12, + "component_count": 12, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45295-2", + "nodes": [ + "issue:45295", + "issue:45373" + ], + "soft_pairs": [], + "prepared_review_unit_hash": "33b797cab310b5d73dd7dfc83857ab5951fc3c891e63bc5d2a8ece07749a11dc", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 0, + "serialized_chars": 1284, + "estimated_input_tokens": 321, + "estimated_eval_tokens": 898 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 0, + "serialized_chars": 1284, + "estimated_input_tokens": 321, + "estimated_eval_tokens": 898 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both issues request the same missing Gemma 4 sequence-classification support. Issue #45373 is the more specific, actionable report and appears to subsume #45295.", + "confidence": 0.97, + "canonical_issue_reason": "#45373 is the clearer canonical issue: it names the exact missing class (`Gemma4ForSequenceClassification`), notes the module gap, and relates it directly to the existing Gemma 2/3 implementation.", + "canonical_pr_reason": null, + "best_issue_reason": "#45373 is the best single issue to track because it is more specific, easier to act on, and fully covers the broader request in #45295.", + "best_pr_reason": null, + "soft_edge_verdicts": [] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 1, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-39895-12", + "nodes": [ + "pull_request:39895", + "pull_request:42668", + "pull_request:43251", + "pull_request:43254", + "pull_request:43424", + "pull_request:43823", + "pull_request:43995", + "pull_request:44827", + "pull_request:45078", + "pull_request:45170", + "pull_request:45213", + "pull_request:45433" + ], + "soft_pairs": [ + "pull_request:39895|pull_request:43424", + "pull_request:42668|pull_request:43823", + "pull_request:42668|pull_request:45078", + "pull_request:43251|pull_request:43823", + "pull_request:43251|pull_request:44827", + "pull_request:43424|pull_request:44827", + "pull_request:43424|pull_request:43823", + "pull_request:43823|pull_request:45078", + "pull_request:43823|pull_request:43995", + "pull_request:43823|pull_request:44827", + "pull_request:43995|pull_request:45078", + "pull_request:43995|pull_request:45213", + "pull_request:43995|pull_request:44827", + "pull_request:44827|pull_request:45433", + "pull_request:44827|pull_request:45170", + "pull_request:44827|pull_request:45078", + "pull_request:45078|pull_request:45213", + "pull_request:44827|pull_request:45213", + "pull_request:43823|pull_request:45213", + "pull_request:43251|pull_request:43254" + ], + "prepared_review_unit_hash": "7774b6573d0e43630631c3b9e7bed087913dcb3bd038b89e15efa3da822e860b", + "component_budget": { + "node_count": 12, + "item_count": 12, + "soft_pair_count": 20, + "serialized_chars": 33958, + "estimated_input_tokens": 8490, + "estimated_eval_tokens": 17236 + }, + "budget": { + "node_count": 12, + "item_count": 12, + "soft_pair_count": 20, + "serialized_chars": 33958, + "estimated_input_tokens": 8490, + "estimated_eval_tokens": 17236 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Only one soft pair looks like a true duplicate: the two `fixed_cross_entropy` PRs both implement the same kwargs passthrough to `nn.functional.cross_entropy`. All other pairs are broader-area similarities rather than the same concrete bug/change.", + "confidence": 0.95, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43251 is the better canonical duplicate for the cross-entropy kwargs fix: it targets the same function, explicitly links the issue, and has the more complete review history. PR 43254 is a near-identical alternate patch.", + "best_issue_reason": null, + "best_pr_reason": "PR 43251 is the best overall PR in the duplicate pair because it is the clearer, more mature version of the same fix and should subsume 43254.", + "soft_edge_verdicts": [ + { + "left": "pull_request:39895", + "right": "pull_request:43424", + "accept": false, + "reason": "Unrelated changes: new VideoPrism model/docs vs an ExecuTorch dynamic-shapes test." + }, + { + "left": "pull_request:42668", + "right": "pull_request:43823", + "accept": false, + "reason": "Different workstreams: processor-from-pretrained robustness vs adding MobileLLM model support." + }, + { + "left": "pull_request:42668", + "right": "pull_request:45078", + "accept": false, + "reason": "Both touch auto-processing/tokenization, but one is a processor-loading robustness change and the other is tokenizer conversion/error handling." + }, + { + "left": "pull_request:43251", + "right": "pull_request:43823", + "accept": false, + "reason": "Loss utility kwargs fix and a new model addition are not the same underlying change." + }, + { + "left": "pull_request:43251", + "right": "pull_request:44827", + "accept": false, + "reason": "Cross-entropy helper fix vs Mistral4 test/integration work; different concrete problems." + }, + { + "left": "pull_request:43424", + "right": "pull_request:44827", + "accept": false, + "reason": "ExecuTorch export test and Mistral4 test fixes are unrelated." + }, + { + "left": "pull_request:43424", + "right": "pull_request:43823", + "accept": false, + "reason": "Dynamic-shape export test vs MobileLLM implementation; not the same bug/change." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45078", + "accept": false, + "reason": "MobileLLM model scaffolding and tokenizer auto behavior are separate changes." + }, + { + "left": "pull_request:43823", + "right": "pull_request:43995", + "accept": false, + "reason": "New model addition vs Falcon output-interface refactor; different code paths and goals." + }, + { + "left": "pull_request:43823", + "right": "pull_request:44827", + "accept": false, + "reason": "MobileLLM addition and Mistral4 test fixes are unrelated." + }, + { + "left": "pull_request:43823", + "right": "pull_request:45213", + "accept": false, + "reason": "New model support vs a model-creation skill/doc PR; not the same underlying change." + }, + { + "left": "pull_request:43995", + "right": "pull_request:44827", + "accept": false, + "reason": "Falcon refactor and Mistral4 test fixes do not address the same bug." + }, + { + "left": "pull_request:43995", + "right": "pull_request:45078", + "accept": false, + "reason": "Falcon model refactoring and tokenizer auto conversion logic are unrelated." + }, + { + "left": "pull_request:43995", + "right": "pull_request:45213", + "accept": false, + "reason": "Model refactor vs meta skill/doc PR; no shared concrete code fix." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45433", + "accept": false, + "reason": "Mistral4 test fixes and SonicMoe kernel integration are separate changes in different subsystems." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45170", + "accept": false, + "reason": "Mistral4 test fixes vs `layrnorm`->`layernorm` renaming/cross-model conversion cleanup." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45078", + "accept": false, + "reason": "Different bugs: Mistral4 test cleanup vs tokenizer auto selection/error handling." + }, + { + "left": "pull_request:45078", + "right": "pull_request:45213", + "accept": false, + "reason": "Tokenizer auto behavior fix and a model-creation skill are unrelated." + }, + { + "left": "pull_request:44827", + "right": "pull_request:45213", + "accept": false, + "reason": "Mistral4 test fixes vs meta skill/doc PR; not mergeable as one change." + }, + { + "left": "pull_request:43251", + "right": "pull_request:43254", + "accept": true, + "reason": "Both PRs patch the same `fixed_cross_entropy` call to pass supported kwargs (`weight` and `label_smoothing`) into `cross_entropy`." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 2, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-40546-2", + "nodes": [ + "pull_request:40546", + "pull_request:42781" + ], + "soft_pairs": [ + "pull_request:40546|pull_request:42781" + ], + "prepared_review_unit_hash": "55dff05834894697df0e314e22de639398601899ed97b9e1e16b8a886e9df1a7", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 7226, + "estimated_input_tokens": 1807, + "estimated_eval_tokens": 3870 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 7226, + "estimated_input_tokens": 1807, + "estimated_eval_tokens": 3870 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two related VibeVoice pull requests overlap heavily in model infrastructure, but one is the base integration and the other is a separate realtime extension. They should not be deduplicated.", + "confidence": 0.89, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #40546 is the better canonical PR because it is the core, non-draft VibeVoice implementation with substantial review activity and broader package/auto-doc integration.", + "best_issue_reason": null, + "best_pr_reason": "PR #40546 is the best representative of the cluster since it establishes the main VibeVoice model support; #42781 is a later draft for realtime/acoustic-tokenizer additions rather than the same change.", + "soft_edge_verdicts": [ + { + "left": "pull_request:40546", + "right": "pull_request:42781", + "accept": false, + "reason": "Both concern VibeVoice, but #40546 implements the base model while #42781 adds realtime support and an acoustic tokenizer. They share subsystem files because #42781 builds on the earlier work, not because they are the same concrete change, so this should not be merged as a duplicate." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 3, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-40755-2", + "nodes": [ + "pull_request:40755", + "pull_request:41224" + ], + "soft_pairs": [ + "pull_request:40755|pull_request:41224" + ], + "prepared_review_unit_hash": "6e275ebc8b77f989d023603b90b6bad827877771da0d14f54d077bbedfee95e7", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4675, + "estimated_input_tokens": 1169, + "estimated_eval_tokens": 2594 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4675, + "estimated_input_tokens": 1169, + "estimated_eval_tokens": 2594 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two PRs are unrelated feature additions: one adds TimesFM covariate forecasting support, the other adds DINOv3 image classification support. They share only a broad Transformers context, not the same underlying bug or change.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": "PR #41224 is the more concrete and established change, with substantial review activity and a focused model-support addition; #40755 is a separate, unrelated feature PR.", + "soft_edge_verdicts": [ + { + "left": "pull_request:40755", + "right": "pull_request:41224", + "accept": false, + "reason": "Different models, different codepaths, and different feature goals (TimesFM covariates vs DINOv3 image classification); not the same underlying change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 4, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "cache", + "cache_hit": true, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41356-2", + "nodes": [ + "pull_request:41356", + "pull_request:44339" + ], + "soft_pairs": [ + "pull_request:41356|pull_request:44339" + ], + "prepared_review_unit_hash": "a82632330ca4f7de455cf3edfe32fa6b4e6e9868a6b84f168bb40df55432c4f1", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5934, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5934, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs target the same DEIMv2 tracker issue, but they are not duplicates: #41356 is an initial model/image-processor landing, while #44339 is a much broader follow-up adding loss, auto-class wiring, conversion, and modularization. The later PR is the better representative of the full feature, but the pair should not be merged as the same change.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "#44339 is the most complete DEIMv2 implementation in the cluster: it includes the model plus auto mappings, loss utilities, conversion script, modular code, and updated docs/tests. #41356 is a smaller earlier subset.", + "best_issue_reason": null, + "best_pr_reason": "#44339 is the best single PR to keep as canonical because it covers the full concrete code-path change, not just the initial scaffold.", + "soft_edge_verdicts": [ + { + "left": "pull_request:41356", + "right": "pull_request:44339", + "accept": false, + "reason": "Related but not the same underlying change. #41356 adds the basic DEIMv2 model/image processor and tests, while #44339 substantially extends the feature with loss code, auto integration, conversion tooling, modular implementation, and config updates. They could not plausibly be merged into one PR without losing the distinction between an initial landing and a later expansion." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 5, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-41797-2", + "nodes": [ + "pull_request:41797", + "pull_request:45186" + ], + "soft_pairs": [ + "pull_request:41797|pull_request:45186" + ], + "prepared_review_unit_hash": "9944ed602e915a1efc520372e61f09dcc2f77887105d5401b8b3916b27ce0ac8", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6722, + "estimated_input_tokens": 1681, + "estimated_eval_tokens": 3618 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6722, + "estimated_input_tokens": 1681, + "estimated_eval_tokens": 3618 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two pull requests are separate model additions for different architectures (DeepSeek OCR vs Isaac). They share repository-wide model registration files, but the concrete code, docs, and tests are unrelated, so this is not a duplicate pair.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "pull_request:41797", + "right": "pull_request:45186", + "accept": false, + "reason": "Different model integrations with distinct filenames, docs, and test suites; shared auto-registration/repo plumbing is generic and does not indicate the same underlying bug or change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 6, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42166-2", + "nodes": [ + "pull_request:42166", + "pull_request:45426" + ], + "soft_pairs": [ + "pull_request:42166|pull_request:45426" + ], + "prepared_review_unit_hash": "efa01757b4c470a89d5647a8e39b4e74139b718f2ee311b85c69053fcde5fec0", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5407, + "estimated_input_tokens": 1352, + "estimated_eval_tokens": 2960 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5407, + "estimated_input_tokens": 1352, + "estimated_eval_tokens": 2960 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The two pull requests are unrelated feature additions: one adds InternVLFlash, the other adds AXK1. Their overlap is limited to shared registration/auto-generated model plumbing, which is not enough to treat them as the same change.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "pull_request:42166", + "right": "pull_request:45426", + "accept": false, + "reason": "Different models and different feature sets; the shared files are only generic registry/auto-config updates, not evidence of the same underlying code change or bug fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 7, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42227-5", + "nodes": [ + "pull_request:42227", + "pull_request:43291", + "pull_request:43665", + "pull_request:44815", + "pull_request:45403" + ], + "soft_pairs": [ + "pull_request:42227|pull_request:43291", + "pull_request:42227|pull_request:43665", + "pull_request:42227|pull_request:44815", + "pull_request:43291|pull_request:45403", + "pull_request:43291|pull_request:43665", + "pull_request:43291|pull_request:44815", + "pull_request:43665|pull_request:45403", + "pull_request:43665|pull_request:44815" + ], + "prepared_review_unit_hash": "21e58977b4d391a3e8b3f32cbca1c0943bc81bce95360e69edbd21365f34d5b9", + "component_budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 8, + "serialized_chars": 12764, + "estimated_input_tokens": 3191, + "estimated_eval_tokens": 6638 + }, + "budget": { + "node_count": 5, + "item_count": 5, + "soft_pair_count": 8, + "serialized_chars": 12764, + "estimated_input_tokens": 3191, + "estimated_eval_tokens": 6638 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "The cluster is mostly unrelated PRs that all happen to be weakly similar by title or broad model/test area. No pair looks like the same underlying change with enough specificity to merge as duplicates.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #42227 is the cleanest representative: it has an explicit issue link, a focused Whisper pipeline bug fix, and a small targeted diff with matching tests.", + "best_issue_reason": null, + "best_pr_reason": "PR #42227 is the best overall PR candidate because it is the most concrete, least noisy, and most clearly tied to a specific user-facing bug.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42227", + "right": "pull_request:43291", + "accept": false, + "reason": "Both touch Whisper, but they fix different paths: return-language forwarding in the ASR pipeline versus decode/timestamp handling in the tokenizer. Not the same bug." + }, + { + "left": "pull_request:42227", + "right": "pull_request:43665", + "accept": false, + "reason": "One is a Whisper ASR bug fix; the other is unrelated test-file edits for CLIP/ViT. No shared concrete problem." + }, + { + "left": "pull_request:42227", + "right": "pull_request:44815", + "accept": false, + "reason": "Whisper return-language plumbing and FP8/dequant loading changes are unrelated subsystems and bugs." + }, + { + "left": "pull_request:43291", + "right": "pull_request:45403", + "accept": false, + "reason": "Both involve tests, but one is Whisper tokenizer/decode behavior and the other is a test helper refactor for common model inference checks. Different underlying issues." + }, + { + "left": "pull_request:43291", + "right": "pull_request:43665", + "accept": false, + "reason": "Whisper decode logic and CLIP/ViT test edits do not address the same code-path problem." + }, + { + "left": "pull_request:43291", + "right": "pull_request:44815", + "accept": false, + "reason": "Tokenizer/decoder changes for Whisper are unrelated to FP8 quantization/dequant fixes." + }, + { + "left": "pull_request:43665", + "right": "pull_request:45403", + "accept": false, + "reason": "Both are test-oriented, but they target different models and different failures; not the same change." + }, + { + "left": "pull_request:43665", + "right": "pull_request:44815", + "accept": false, + "reason": "A bogus-looking test-only CLIP/ViT edit is not the same as the Mistral4 FP8 dequant fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 8, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42975-2", + "nodes": [ + "pull_request:42975", + "pull_request:42976" + ], + "soft_pairs": [ + "pull_request:42975|pull_request:42976" + ], + "prepared_review_unit_hash": "3d27c23169e4a7b8f787ef193f6037fa8c646d6c33eb35445b4fcd1781675f0b", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5933, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5933, + "estimated_input_tokens": 1484, + "estimated_eval_tokens": 3224 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both items are maintenance PRs updating GitHub Actions workflow dependencies, but they target different action families and different upgrade goals. They overlap in affected workflow files, yet they do not look like the same concrete change set.", + "confidence": 0.89, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 42976 is the broader umbrella change: it upgrades multiple GitHub Actions to newer versions across several workflows, and its title is the most general fit for a cluster representative.", + "best_issue_reason": null, + "best_pr_reason": "PR 42976 is the best single representative because it covers the wider maintenance update and has the more general scope/title, whereas PR 42975 is centered on Node 24 compatibility for a narrower set of actions.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42975", + "right": "pull_request:42976", + "accept": false, + "reason": "Do not merge: 42975 updates checkout/cache/upload-artifact for Node 24 compatibility, while 42976 updates docker/build-push-action and other actions to latest versions. They touch similar workflow files, but they are different action migrations with different concrete purposes and could be merged separately." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 9, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-42978-3", + "nodes": [ + "pull_request:42978", + "pull_request:43448", + "pull_request:43451" + ], + "soft_pairs": [ + "pull_request:42978|pull_request:43448", + "pull_request:42978|pull_request:43451", + "pull_request:43448|pull_request:43451" + ], + "prepared_review_unit_hash": "2b204e08a71e099374e684e2679cdd3a2ce6b0c51219c9ca75e2b9460e7965e7", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 10145, + "estimated_input_tokens": 2537, + "estimated_eval_tokens": 5330 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 10145, + "estimated_input_tokens": 2537, + "estimated_eval_tokens": 5330 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All three pull requests are separate model additions, not duplicates: ViT NEPA, Molmo, and Molmo2 each introduce different model families and distinct codepaths. The only overlap is expected boilerplate around docs and auto-registration.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "No canonical PR for deduping here; these PRs are about different models and are not mergeable into a single fix/change.", + "best_issue_reason": null, + "best_pr_reason": "If one must be chosen as a representative, PR #42978 is the most self-contained due to its explicit linked issue and focused single-model scope, but it is not a duplicate of the others.", + "soft_edge_verdicts": [ + { + "left": "pull_request:42978", + "right": "pull_request:43448", + "accept": false, + "reason": "ViT NEPA vs Molmo are different models with different implementation files and docs; shared auto-registration/docs boilerplate is not enough to treat them as the same change." + }, + { + "left": "pull_request:42978", + "right": "pull_request:43451", + "accept": false, + "reason": "ViT NEPA and Molmo2 are unrelated model additions. They touch some of the same Transformers plumbing, but they fix/add different concrete models and codepaths." + }, + { + "left": "pull_request:43448", + "right": "pull_request:43451", + "accept": false, + "reason": "Molmo and Molmo2 are adjacent names but still distinct model families with separate source trees and documentation; this is not one underlying bug or mergeable change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 10, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43488-2", + "nodes": [ + "pull_request:43488", + "pull_request:43532" + ], + "soft_pairs": [ + "pull_request:43488|pull_request:43532" + ], + "prepared_review_unit_hash": "bdff12eccb9772a9094ed9512a55ae0d0c72925841eea798f77ba7cd761e80e7", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4563, + "estimated_input_tokens": 1141, + "estimated_eval_tokens": 2538 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4563, + "estimated_input_tokens": 1141, + "estimated_eval_tokens": 2538 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two open PRs, but they address different areas: one is a repo-bot/version/dependency test change, the other is a workflow/notification diff-display change. The only overlap is a shared ViT test file, which looks incidental rather than the same underlying fix.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43532 is the better representative because it has broader review activity and touches the main workflow/notification path, making it more informative as the cluster exemplar.", + "best_issue_reason": null, + "best_pr_reason": "PR 43532 is the strongest standalone representative: it has more discussion/review activity and a clearer, broader code-path change than the repo-bot test PR.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43488", + "right": "pull_request:43532", + "accept": false, + "reason": "Reject: these PRs do not fix the same concrete problem. 43488 is a packaging/version/dependency test change, while 43532 changes CI/workflow and notification-related code. The shared ViT test filename appears incidental and they would not plausibly merge into one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 11, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43612-2", + "nodes": [ + "pull_request:43612", + "pull_request:43613" + ], + "soft_pairs": [ + "pull_request:43612|pull_request:43613" + ], + "prepared_review_unit_hash": "b3f01127834c1099f1850f70f6ae4244e52b509e62a46c5a78178d1f4327179a", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6083, + "estimated_input_tokens": 1521, + "estimated_eval_tokens": 3298 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 6083, + "estimated_input_tokens": 1521, + "estimated_eval_tokens": 3298 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two open PRs add different segmentation pipelines: one for Promptable Concept Segmentation and one for Promptable Visual Segmentation. They share documentation and registry plumbing, but they target different tasks, model sets, and code paths, so they are not duplicates.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": "Neither PR is a good canonical representative for a duplicate cluster because they implement different features rather than the same fix; if forced, PR 43613 is broader, but not a duplicate of 43612.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43612", + "right": "pull_request:43613", + "accept": false, + "reason": "Reject: they add different pipelines (concept vs visual segmentation) with different task docs, model integrations, and test files; shared infra changes are not enough to treat them as the same PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 12, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43651-2", + "nodes": [ + "pull_request:43651", + "pull_request:43663" + ], + "soft_pairs": [ + "pull_request:43651|pull_request:43663" + ], + "prepared_review_unit_hash": "92fd629ef5ecb4476ed8d2270e8f529d649bd3b9695ae45c3e1a562d1919c775", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4291, + "estimated_input_tokens": 1073, + "estimated_eval_tokens": 2402 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4291, + "estimated_input_tokens": 1073, + "estimated_eval_tokens": 2402 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two PRs only overlap in `src/transformers/trainer.py` but change different trainer behaviors: one adds a hook for gradient-accumulation loss scaling, the other refactors signature-column filtering for custom trainers. They are not the same underlying change and should not be merged as duplicates.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": null, + "best_issue_reason": null, + "best_pr_reason": null, + "soft_edge_verdicts": [ + { + "left": "pull_request:43651", + "right": "pull_request:43663", + "accept": false, + "reason": "Different code paths and different goals: GA loss scaling vs. dataloader column/signature filtering. Shared file alone is not enough to treat them as the same PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 13, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43656-3", + "nodes": [ + "pull_request:43656", + "pull_request:43836", + "pull_request:43842" + ], + "soft_pairs": [ + "pull_request:43656|pull_request:43842", + "pull_request:43836|pull_request:43842" + ], + "prepared_review_unit_hash": "7dd67e9306aa0c9033f983eaa260c8ff8804830a5ad5179ac659ddd5b48fb771", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 5992, + "estimated_input_tokens": 1498, + "estimated_eval_tokens": 3252 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 5992, + "estimated_input_tokens": 1498, + "estimated_eval_tokens": 3252 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two PRs appear to be alternate fixes for the same `TypeAdapter`/annotation NameError in `src/transformers/cli/serve.py`; the third PR looks like a separate typing-related cleanup in the same file rather than the same bug.", + "confidence": 0.73, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #43842 is the cleanest, most targeted fix for the concrete runtime failure: it explicitly addresses the `TypeAdapter` NameError when `pydantic` is absent, with a minimal change set in `serve.py`.", + "best_issue_reason": null, + "best_pr_reason": "PR #43842 best represents the underlying bug because it directly names the failure mode, stays narrowly scoped to the affected code path, and is easier to merge as the canonical fix than the broader annotation cleanup in #43836.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43656", + "right": "pull_request:43842", + "accept": false, + "reason": "Both touch `serve.py`, but #43656 looks like a broader annotation/future-annotations cleanup around `TimedModel`, not the same specific `TypeAdapter`-missing-`pydantic` NameError fixed by #43842." + }, + { + "left": "pull_request:43836", + "right": "pull_request:43842", + "accept": true, + "reason": "Both are alternate fixes for the same `TypeAdapter` name-resolution problem in `serve.py` and share the same issue target; they look like overlapping implementations of the same underlying runtime bug." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "The summary is grounded in the packet and stays conservative. The 43836/43842 pair does look like overlapping fixes for the same TypeAdapter NameError in `serve.py`, while 43656 appears broader and not clearly the same bugfix based on the diff preview. No overstatement stands out." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 14, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43757-2", + "nodes": [ + "pull_request:43757", + "pull_request:45118" + ], + "soft_pairs": [ + "pull_request:43757|pull_request:45118" + ], + "prepared_review_unit_hash": "a486ed5323032f99f7840a331d1736f23b689f0faab1a7000260db763a7511db", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4608, + "estimated_input_tokens": 1152, + "estimated_eval_tokens": 2560 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4608, + "estimated_input_tokens": 1152, + "estimated_eval_tokens": 2560 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs address GPT-OSS GGUF loading; the second is a broader follow-up that supersedes the first by moving from a fallback workaround to full support.", + "confidence": 0.88, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #45118 is the more complete GPT-OSS GGUF fix, explicitly supersedes #43757, and adds the actual loading support rather than a stopgap fallback.", + "best_issue_reason": null, + "best_pr_reason": "#45118 is the best representative because it resolves the same user-facing GGUF loading problem with a fuller implementation, docs, and tests.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43757", + "right": "pull_request:45118", + "accept": true, + "reason": "Same underlying GPT-OSS GGUF loading path in the same utility file; #45118 expands and supersedes the workaround in #43757, so they are close enough to treat as duplicate-related." + } + ] + }, + "evaluator_result": { + "accept": true, + "feedback": "Accept. The packet supports the core claim: both PRs target the same GPT-OSS GGUF loading problem, they share `modeling_gguf_pytorch_utils.py`, and #45118 explicitly says it supersedes #43757 while adding fuller support. The summary and soft-edge verdict stay within the evidence and do not overstate it." + }, + "evaluator_used": true, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 15, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43779-2", + "nodes": [ + "pull_request:43779", + "pull_request:43816" + ], + "soft_pairs": [ + "pull_request:43779|pull_request:43816" + ], + "prepared_review_unit_hash": "e7c674adcc031aa8883109a5dfc678b4407fb20b10764302092e384d031ba83a", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4362, + "estimated_input_tokens": 1091, + "estimated_eval_tokens": 2438 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4362, + "estimated_input_tokens": 1091, + "estimated_eval_tokens": 2438 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both PRs target the same SwanLab resume/id feature and the same file, but one is the implementation change and the other appears to be mostly documentation/supporting text. They should not be merged as duplicates for triage.", + "confidence": 0.9, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43779 is the stronger canonical PR because it contains the actual callback implementation to pass through id/resume kwargs, which is the core functional change.", + "best_issue_reason": null, + "best_pr_reason": "PR 43779 is the best representative of the cluster because it directly implements the missing SwanLabCallback behavior; PR 43816 looks secondary/doc-focused.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43779", + "right": "pull_request:43816", + "accept": false, + "reason": "Same issue target and subsystem, but the previews show different scopes: 43779 changes callback initialization to support resume/id, while 43816 mainly adds docstring entries. Not the same concrete code-path change, so do not merge as duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 16, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43838-3", + "nodes": [ + "pull_request:43838", + "pull_request:43973", + "pull_request:45283" + ], + "soft_pairs": [ + "pull_request:43838|pull_request:45283", + "pull_request:43973|pull_request:45283" + ], + "prepared_review_unit_hash": "3d087bcadfd9802305de9f04c4781d778c5a9c0c790fb8769116f95f4e264503", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 8736, + "estimated_input_tokens": 2184, + "estimated_eval_tokens": 4624 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 2, + "serialized_chars": 8736, + "estimated_input_tokens": 2184, + "estimated_eval_tokens": 4624 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These are three unrelated pull requests: Qwen3-ASR support, Lfm2Audio model/docs support, and Qwen3.5 GGUF loading support. They touch different model families and different code paths, so this is not a duplicate cluster.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 43838 is the most substantial and centralized change set, with broad model/auto/processor additions and an explicit issue target, so it is the best representative artifact in this mixed cluster.", + "best_issue_reason": null, + "best_pr_reason": "PR 43838 is the strongest standalone representative because it introduces a complete new model integration and has the clearest issue linkage; the other PRs are narrower and unrelated to it.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43838", + "right": "pull_request:45283", + "accept": false, + "reason": "Different features and code paths: Qwen3-ASR model integration vs Qwen3.5 GGUF loading support. Same broad transformer ecosystem, but not the same underlying change." + }, + { + "left": "pull_request:43973", + "right": "pull_request:45283", + "accept": false, + "reason": "Lfm2Audio model/docs addition and Qwen3.5 GGUF loading are unrelated changes in different model families with no shared concrete bug or fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 17, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 4, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43996", + "pull_request:44007", + "pull_request:44013", + "pull_request:44018", + "pull_request:44019", + "pull_request:44044", + "pull_request:44066", + "pull_request:44068", + "pull_request:44071", + "pull_request:44072", + "pull_request:44085", + "pull_request:44086", + "pull_request:44116", + "pull_request:44722" + ], + "soft_pairs": [ + "pull_request:43996|pull_request:44085", + "pull_request:43996|pull_request:44044", + "pull_request:44066|pull_request:44085", + "pull_request:44007|pull_request:44072", + "pull_request:44072|pull_request:44722", + "pull_request:44066|pull_request:44072", + "pull_request:44013|pull_request:44044", + "pull_request:44066|pull_request:44086", + "pull_request:44066|pull_request:44071", + "pull_request:44018|pull_request:44068", + "pull_request:44066|pull_request:44068", + "pull_request:44018|pull_request:44066", + "pull_request:44019|pull_request:44722", + "pull_request:44086|pull_request:44722", + "pull_request:44019|pull_request:44085", + "pull_request:44018|pull_request:44019", + "pull_request:44019|pull_request:44071", + "pull_request:44068|pull_request:44116", + "pull_request:44068|pull_request:44722", + "pull_request:44018|pull_request:44116", + "pull_request:44071|pull_request:44722", + "pull_request:44018|pull_request:44086", + "pull_request:44018|pull_request:44085", + "pull_request:44071|pull_request:44085" + ], + "prepared_review_unit_hash": "4983703ddec7656295b939b42586d97440d7fca4469d8dae33518e2818429038", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 77, + "serialized_chars": 84502, + "estimated_input_tokens": 21126, + "estimated_eval_tokens": 42508 + }, + "budget": { + "node_count": 14, + "item_count": 14, + "soft_pair_count": 24, + "serialized_chars": 34773, + "estimated_input_tokens": 8694, + "estimated_eval_tokens": 17644 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a theme cluster of output-tracing refactors across many different model implementations, not one duplicate bug. The only likely duplicate-style edges are within the GPT-Neo and GPT-J subgroups; the rest are same initiative but different code paths/models.", + "confidence": 0.88, + "canonical_issue_reason": null, + "canonical_pr_reason": "44722 is the most complete/latest representative of the overlapping GPT-J output-tracing refactor work, with the GPT-J block change plus the copied CodeGen cleanup.", + "best_issue_reason": null, + "best_pr_reason": "44722 is the best representative PR in the duplicate-like subset because it is the broadest and most complete GPT-J output-tracing refactor in this set.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43996", + "right": "pull_request:44085", + "accept": false, + "reason": "Different model codepaths (CVT/FNet vs GPT-J); same tracking theme only, not the same change." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44044", + "accept": false, + "reason": "Different models and implementations; both are output-tracing refactors, but not the same underlying bug/change." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44085", + "accept": true, + "reason": "Both touch GPT-J output tracing in modeling_gptj.py and appear to refactor the same concrete code path." + }, + { + "left": "pull_request:44007", + "right": "pull_request:44072", + "accept": false, + "reason": "ResNet and EfficientNet are separate model implementations; same refactor pattern, not a duplicate change." + }, + { + "left": "pull_request:44072", + "right": "pull_request:44722", + "accept": false, + "reason": "EfficientNet vs GPT-J: unrelated model codepaths despite similar decorator-based refactor intent." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44072", + "accept": false, + "reason": "GPT-J vs EfficientNet are different concrete implementations, so this is not the same underlying fix." + }, + { + "left": "pull_request:44013", + "right": "pull_request:44044", + "accept": false, + "reason": "MobileNetV2 and DeBERTa v2 are unrelated codepaths; only the refactor style matches." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-J output tracing vs MGP-STR output tracing are different model changes, not one mergeable duplicate." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-J and MPT are different models; same refactor family but not the same code-path problem." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44068", + "accept": true, + "reason": "Both are GPT-Neo output-tracing refactors on the same file/code path and look like alternate versions of the same change." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44068", + "accept": false, + "reason": "GPT-J and GPT-Neo are different model implementations, so these are not duplicates." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44066", + "accept": false, + "reason": "Different models (GPT-Neo vs GPT-J); similar refactor theme only." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44722", + "accept": false, + "reason": "ResNet and GPT-J are unrelated model implementations and do not fix the same code path." + }, + { + "left": "pull_request:44086", + "right": "pull_request:44722", + "accept": false, + "reason": "MGP-STR vs GPT-J are different model codepaths; not the same underlying change." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44085", + "accept": false, + "reason": "ResNet vs GPT-J are different model implementations; shared output-tracing motif is too broad." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44019", + "accept": false, + "reason": "GPT-Neo and ResNet are separate model-specific refactors, not one duplicate fix." + }, + { + "left": "pull_request:44019", + "right": "pull_request:44071", + "accept": false, + "reason": "ResNet vs MPT are unrelated concrete implementations." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44116", + "accept": false, + "reason": "GPT-Neo and Flaubert touch different model codepaths; same decorator migration theme only." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44722", + "accept": false, + "reason": "GPT-Neo vs GPT-J are different model-specific changes, so not duplicates." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44116", + "accept": false, + "reason": "GPT-Neo and Flaubert are separate model implementations and not the same bug/change." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44085", + "accept": false, + "reason": "MPT and GPT-J are different codepaths; no evidence they are the same underlying change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": true, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 17, + "component_count": 24, + "review_unit_index": 2, + "review_unit_count": 4, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43997", + "pull_request:44017", + "pull_request:44018", + "pull_request:44056", + "pull_request:44059", + "pull_request:44068", + "pull_request:44071", + "pull_request:44073", + "pull_request:44074", + "pull_request:44076", + "pull_request:44085", + "pull_request:44086", + "pull_request:44098", + "pull_request:44101", + "pull_request:44161", + "pull_request:44722" + ], + "soft_pairs": [ + "pull_request:44068|pull_request:44086", + "pull_request:44059|pull_request:44086", + "pull_request:44059|pull_request:44722", + "pull_request:44085|pull_request:44086", + "pull_request:44098|pull_request:44101", + "pull_request:44018|pull_request:44071", + "pull_request:44017|pull_request:44722", + "pull_request:44017|pull_request:44161", + "pull_request:44017|pull_request:44073", + "pull_request:44068|pull_request:44071", + "pull_request:44071|pull_request:44086", + "pull_request:44017|pull_request:44071", + "pull_request:44059|pull_request:44071", + "pull_request:43997|pull_request:44073", + "pull_request:44073|pull_request:44101", + "pull_request:44059|pull_request:44068", + "pull_request:44018|pull_request:44059", + "pull_request:44056|pull_request:44101", + "pull_request:44073|pull_request:44161", + "pull_request:44101|pull_request:44161", + "pull_request:44017|pull_request:44018", + "pull_request:43997|pull_request:44056", + "pull_request:44071|pull_request:44076", + "pull_request:44074|pull_request:44101" + ], + "prepared_review_unit_hash": "c693dca202400388b55146b08377a974a429a33170da06e8b7ec21e50e015ba7", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 77, + "serialized_chars": 84502, + "estimated_input_tokens": 21126, + "estimated_eval_tokens": 42508 + }, + "budget": { + "node_count": 16, + "item_count": 16, + "soft_pair_count": 24, + "serialized_chars": 38231, + "estimated_input_tokens": 9558, + "estimated_eval_tokens": 19372 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is a set of similar but model-specific refactors to standardized output tracing/capture_outputs, not duplicates of one underlying bug or change. The PRs touch different model implementations and should stay separate.", + "confidence": 0.82, + "canonical_issue_reason": null, + "canonical_pr_reason": "44161 is the clearest representative of the theme: a substantial, self-contained standardized output-tracing refactor with an explicit Fixes #43979 link and broad implementation coverage.", + "best_issue_reason": null, + "best_pr_reason": "44161 is the best single representative of this cluster because it most clearly embodies the shared refactor pattern and appears more complete than the smaller model-specific variants.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44068", + "right": "pull_request:44086", + "accept": false, + "reason": "Both are output-tracing refactors, but they target different models (GPT-Neo vs MGP-STR) and different code paths." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-2 and MGP-STR are separate model implementations; same refactor theme, not the same change." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44722", + "accept": false, + "reason": "These are model-specific refactors in different code paths (GPT-2 vs GPT-J/CodeGen), so they are not duplicates." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44086", + "accept": false, + "reason": "GPT-J output tracing and MGP-STR output tracing are distinct model changes, not one underlying fix." + }, + { + "left": "pull_request:44098", + "right": "pull_request:44101", + "accept": false, + "reason": "ViLT and Flaubert/XLM are unrelated model implementations; the similarity is only the standardized refactor pattern." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-Neo and MPT are different model code paths; both refactor output handling, but they are separate changes." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44722", + "accept": false, + "reason": "SegFormer and GPT-J/CodeGen are unrelated models, so this is not the same bug or PR." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44161", + "accept": false, + "reason": "SegFormer and LongT5 are separate model refactors; they share a pattern but not a concrete duplicate change." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44073", + "accept": false, + "reason": "SegFormer and VisualBert touch different architectures and code paths, so they should not be merged as duplicates." + }, + { + "left": "pull_request:44068", + "right": "pull_request:44071", + "accept": false, + "reason": "Both are output tracing refactors, but GPT-Neo and MPT are different model implementations with separate edits." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44086", + "accept": false, + "reason": "MPT and MGP-STR are unrelated model files and changes; similarity is only at the refactor level." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44071", + "accept": false, + "reason": "SegFormer and MPT are different model code paths, so this is not a duplicate change." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44071", + "accept": false, + "reason": "GPT-2 and MPT are distinct model refactors; no shared concrete bug or fix." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44073", + "accept": false, + "reason": "RegNet and VisualBert are different architectures; the refactor pattern is similar but the changes are not the same." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44101", + "accept": false, + "reason": "VisualBert and Flaubert/XLM are separate model implementations, so they are not duplicates." + }, + { + "left": "pull_request:44059", + "right": "pull_request:44068", + "accept": false, + "reason": "GPT-2 and GPT-Neo are different model implementations; same standardized-output theme, but not one change." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44101", + "accept": false, + "reason": "MPNet and Flaubert/XLM are different code paths and different refactors." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44161", + "accept": false, + "reason": "VisualBert and LongT5 are unrelated model-specific refactors, not the same underlying fix." + }, + { + "left": "pull_request:44101", + "right": "pull_request:44161", + "accept": false, + "reason": "Flaubert/XLM and LongT5 are separate model changes; similarity is only the output-tracing migration." + }, + { + "left": "pull_request:44017", + "right": "pull_request:44018", + "accept": false, + "reason": "SegFormer and GPT-Neo are different model refactors, so they should not be deduplicated." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44056", + "accept": false, + "reason": "RegNet and MPNet are unrelated model implementations with separate output-tracing changes." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44076", + "accept": false, + "reason": "MPT and ImageGPT are different models; these are separate standardized-output refactors." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44101", + "accept": false, + "reason": "TextNet and Flaubert/XLM are different model code paths and therefore not duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 17, + "component_count": 24, + "review_unit_index": 3, + "review_unit_count": 4, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43996", + "pull_request:43997", + "pull_request:44007", + "pull_request:44010", + "pull_request:44017", + "pull_request:44018", + "pull_request:44044", + "pull_request:44056", + "pull_request:44066", + "pull_request:44071", + "pull_request:44073", + "pull_request:44074", + "pull_request:44085", + "pull_request:44129", + "pull_request:44154", + "pull_request:44161", + "pull_request:44722" + ], + "soft_pairs": [ + "pull_request:44056|pull_request:44161", + "pull_request:43997|pull_request:44018", + "pull_request:43997|pull_request:44161", + "pull_request:44074|pull_request:44161", + "pull_request:43997|pull_request:44071", + "pull_request:44010|pull_request:44071", + "pull_request:44010|pull_request:44073", + "pull_request:44018|pull_request:44129", + "pull_request:43997|pull_request:44074", + "pull_request:43997|pull_request:44129", + "pull_request:44010|pull_request:44018", + "pull_request:44071|pull_request:44129", + "pull_request:43996|pull_request:44161", + "pull_request:43996|pull_request:44017", + "pull_request:44010|pull_request:44044", + "pull_request:44044|pull_request:44074", + "pull_request:44066|pull_request:44722", + "pull_request:44085|pull_request:44722", + "pull_request:44085|pull_request:44129", + "pull_request:43996|pull_request:44007", + "pull_request:43996|pull_request:44129", + "pull_request:44044|pull_request:44129", + "pull_request:44044|pull_request:44154" + ], + "prepared_review_unit_hash": "3de935753b8f57416f7db944f0d92773c5156ad0d0e2c985750b71087be17ecf", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 77, + "serialized_chars": 84502, + "estimated_input_tokens": 21126, + "estimated_eval_tokens": 42508 + }, + "budget": { + "node_count": 17, + "item_count": 17, + "soft_pair_count": 23, + "serialized_chars": 40364, + "estimated_input_tokens": 10091, + "estimated_eval_tokens": 20438 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "This cluster is mostly a set of model-specific output-tracing refactors under issue #43979, but most are separate per-model changes rather than true duplicates. The only clear duplicate-like overlap is the GPT-J/CodeGen refactor pair.", + "confidence": 0.79, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44066 is the clearest representative: it is focused on the GPT-J/CodeGen standardized output-tracing refactor, has the explicit shared issue target #43979, and its file set matches the concrete code path being changed.", + "best_issue_reason": null, + "best_pr_reason": "PR 44066 is the best cluster representative because it most clearly captures the shared GPT-J/CodeGen change and is better scoped than the broader, model-mixed refactor PRs.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44056", + "right": "pull_request:44161", + "accept": false, + "reason": "Both are output-tracing refactors, but they affect different models and code paths (MPNet vs LongT5), so they are not the same concrete change." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44018", + "accept": false, + "reason": "RegNet and GPT-Neo are unrelated model implementations; same refactor theme and shared issue target are not enough to make this a duplicate." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44161", + "accept": false, + "reason": "These target different models and different forward paths, so they are separate refactors rather than one mergeable change." + }, + { + "left": "pull_request:44074", + "right": "pull_request:44161", + "accept": false, + "reason": "TextNet and LongT5 touch different architectures and execution paths; they are not the same underlying bug or change." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44071", + "accept": false, + "reason": "RegNet vs MPT are distinct model code paths, so this is only a thematic similarity, not a duplicate." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44071", + "accept": false, + "reason": "SqueezeBERT and MPT are different models; both use output-tracing refactors, but not the same concrete fix." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44073", + "accept": false, + "reason": "These are separate model-specific refactors in unrelated code paths (SqueezeBERT vs VisualBERT)." + }, + { + "left": "pull_request:44018", + "right": "pull_request:44129", + "accept": false, + "reason": "GPT-Neo and SpeechT5 are unrelated implementations; the overlap is only in the refactor pattern." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44074", + "accept": false, + "reason": "RegNet and TextNet are different model families and do not appear to address the same code-path problem." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44129", + "accept": false, + "reason": "RegNet and SpeechT5 are unrelated models, so these are not duplicate fixes." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44018", + "accept": false, + "reason": "SqueezeBERT and GPT-Neo are distinct model implementations; same refactor theme is insufficient to merge them." + }, + { + "left": "pull_request:44071", + "right": "pull_request:44129", + "accept": false, + "reason": "MPT and SpeechT5 are different model code paths, so this is not the same change." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44161", + "accept": false, + "reason": "CVT/FNet and LongT5 are separate model refactors with no shared concrete code-path fix." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44017", + "accept": false, + "reason": "CVT/FNet and SegFormer are different model families; the shared output-tracing theme is too broad to treat as a duplicate." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44044", + "accept": false, + "reason": "SqueezeBERT and DeBERTa-v2 are different model code paths; these do not look like the same underlying bug or change." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44074", + "accept": false, + "reason": "DeBERTa-v2 and TextNet are unrelated model implementations, so they are not duplicates." + }, + { + "left": "pull_request:44066", + "right": "pull_request:44722", + "accept": true, + "reason": "Both PRs touch the same GPT-J/CodeGen files, target the same issue #43979, and implement the same standardized output-tracing refactor." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44722", + "accept": true, + "reason": "Despite the odd title, both PRs are centered on GPT-J output tracing in the same file and are aiming at the same standardized decorator-based refactor." + }, + { + "left": "pull_request:44085", + "right": "pull_request:44129", + "accept": false, + "reason": "GPT-J and SpeechT5 are different models, so they are not the same concrete fix." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44007", + "accept": false, + "reason": "These are separate model refactors (CVT/FNet vs ResNet/RT-DETR ResNet), not one underlying code-path change." + }, + { + "left": "pull_request:43996", + "right": "pull_request:44129", + "accept": false, + "reason": "CVT/FNet and SpeechT5 are unrelated implementations, so this is not a duplicate pair." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44129", + "accept": false, + "reason": "DeBERTa-v2 and SpeechT5 are unrelated model paths; only the refactor pattern overlaps." + }, + { + "left": "pull_request:44044", + "right": "pull_request:44154", + "accept": false, + "reason": "DeBERTa-v2 and Vits are distinct models and do not appear to fix the same bug or change." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 17, + "component_count": 24, + "review_unit_index": 4, + "review_unit_count": 4, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43996-28", + "nodes": [ + "pull_request:43997", + "pull_request:44010", + "pull_request:44024", + "pull_request:44056", + "pull_request:44073", + "pull_request:44074", + "pull_request:44076", + "pull_request:44129" + ], + "soft_pairs": [ + "pull_request:44024|pull_request:44076", + "pull_request:44073|pull_request:44074", + "pull_request:44056|pull_request:44073", + "pull_request:44056|pull_request:44074", + "pull_request:43997|pull_request:44010", + "pull_request:44010|pull_request:44129" + ], + "prepared_review_unit_hash": "988d264ba6c24724a9800e84e4845da09674600fe6610c86b5058d0265f51b18", + "component_budget": { + "node_count": 28, + "item_count": 28, + "soft_pair_count": 77, + "serialized_chars": 84502, + "estimated_input_tokens": 21126, + "estimated_eval_tokens": 42508 + }, + "budget": { + "node_count": 8, + "item_count": 8, + "soft_pair_count": 6, + "serialized_chars": 17124, + "estimated_input_tokens": 4281, + "estimated_eval_tokens": 8818 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These PRs are all model-specific output-tracing refactors, but they affect different architectures and code paths, so they should not be collapsed into one duplicate cluster. The similarity is mostly in the shared migration pattern, not in a single underlying bug or change.", + "confidence": 0.92, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44129 is the strongest representative because it has an explicit issue target, a clearly scoped model-specific refactor, and more complete implementation context than the others.", + "best_issue_reason": null, + "best_pr_reason": "PR 44129 is the most complete and well-scoped example of the shared output-capture migration pattern, with an explicit tracking issue and clearer refactor intent.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44024", + "right": "pull_request:44076", + "accept": false, + "reason": "Both are standardized output-tracing refactors, but they touch different models (FocalNet vs ImageGPT) and different forward paths; not the same concrete change." + }, + { + "left": "pull_request:44073", + "right": "pull_request:44074", + "accept": false, + "reason": "Both migrate to capture_outputs/can_return_tuple, but VisualBert and TextNet are separate model implementations and not mergeable as one PR." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44073", + "accept": false, + "reason": "These are similar refactors in different architectures (MPNet vs VisualBert); they share a pattern, not a single underlying bug or code path." + }, + { + "left": "pull_request:44056", + "right": "pull_request:44074", + "accept": false, + "reason": "MPNet and TextNet are unrelated model-specific output-tracing changes; the overlap is only in the refactor style." + }, + { + "left": "pull_request:43997", + "right": "pull_request:44010", + "accept": false, + "reason": "RegNet and SqueezeBert are distinct model code paths, so this is only thematic similarity rather than the same change." + }, + { + "left": "pull_request:44010", + "right": "pull_request:44129", + "accept": false, + "reason": "SqueezeBert and SpeechT5 both adjust output-capture plumbing, but they are separate model-specific implementations and not a single duplicate fix." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 18, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43998-13", + "nodes": [ + "pull_request:43998", + "pull_request:43999", + "pull_request:44000", + "pull_request:44001", + "pull_request:44002", + "pull_request:44003", + "pull_request:44004", + "pull_request:44025", + "pull_request:44026", + "pull_request:44027", + "pull_request:44028", + "pull_request:44029", + "pull_request:44030" + ], + "soft_pairs": [ + "pull_request:43998|pull_request:44003", + "pull_request:44001|pull_request:44003", + "pull_request:44002|pull_request:44003", + "pull_request:44000|pull_request:44003", + "pull_request:44003|pull_request:44028", + "pull_request:44003|pull_request:44004", + "pull_request:44000|pull_request:44004", + "pull_request:43999|pull_request:44001", + "pull_request:43999|pull_request:44002", + "pull_request:43999|pull_request:44025", + "pull_request:43999|pull_request:44026", + "pull_request:43999|pull_request:44027", + "pull_request:43998|pull_request:44002", + "pull_request:43998|pull_request:44000", + "pull_request:44026|pull_request:44029", + "pull_request:43998|pull_request:44001", + "pull_request:44029|pull_request:44030", + "pull_request:44025|pull_request:44026", + "pull_request:44025|pull_request:44027", + "pull_request:44025|pull_request:44028", + "pull_request:44026|pull_request:44027", + "pull_request:44026|pull_request:44028", + "pull_request:44027|pull_request:44028", + "pull_request:44001|pull_request:44002" + ], + "prepared_review_unit_hash": "78fc6564b492e96fcba193a31efb30f0d95314aa1c81c5982e0280d59cb67575", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 43, + "serialized_chars": 41321, + "estimated_input_tokens": 10331, + "estimated_eval_tokens": 20918 + }, + "budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 24, + "serialized_chars": 32109, + "estimated_input_tokens": 8028, + "estimated_eval_tokens": 16312 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These PRs all implement a similar output-tracing refactor pattern, but they target different model architectures and code paths, so they are not duplicates of the same underlying change.", + "confidence": 0.94, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 44003 is the broadest and most representative refactor in the set, touching two closely related Mamba-family models and the new output-capture plumbing.", + "best_issue_reason": null, + "best_pr_reason": "PR 44003 is the strongest single representative of the cluster because it makes the most substantive tracing changes and spans the closest pair of related models here.", + "soft_edge_verdicts": [ + { + "left": "pull_request:43998", + "right": "pull_request:44003", + "accept": false, + "reason": "Same refactor theme, but timm_backbone and mamba are unrelated model paths; not the same change." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44003", + "accept": false, + "reason": "Both are output-tracing refactors, but univnet and mamba are different architectures with different forward paths." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44003", + "accept": false, + "reason": "upernet segmentation code and mamba stateful LM code are unrelated; only the refactor style matches." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44003", + "accept": false, + "reason": "vision_text_dual_encoder and mamba are different subsystems; this is similarity in naming, not the same bug/change." + }, + { + "left": "pull_request:44003", + "right": "pull_request:44028", + "accept": false, + "reason": "mamba/falcon_mamba tracing changes are not the same as superpoint feature extraction changes." + }, + { + "left": "pull_request:44003", + "right": "pull_request:44004", + "accept": false, + "reason": "codegen and mamba modify different forward implementations; no shared concrete code-path issue." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44004", + "accept": false, + "reason": "Both are refactors, but they affect unrelated models and do not look mergeable as one PR." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44001", + "accept": false, + "reason": "mobilenet_v1 and univnet are unrelated model families; same tracing pattern only." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44002", + "accept": false, + "reason": "mobilenet_v1 backbone/classifier code is not the same underlying change as upernet segmentation output handling." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44025", + "accept": false, + "reason": "mobilenet_v1 and depth_anything are separate model implementations; no duplicate code-path problem." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44026", + "accept": false, + "reason": "mobilenet_v1 and vision_encoder_decoder target different model stacks; only the refactor motif overlaps." + }, + { + "left": "pull_request:43999", + "right": "pull_request:44027", + "accept": false, + "reason": "mobilenet_v1 and speech_encoder_decoder are different architectures and not the same fix." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44002", + "accept": false, + "reason": "timm_backbone and upernet are different model components; the shared wording does not imply duplicate work." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44000", + "accept": false, + "reason": "timm_backbone and vision_text_dual_encoder touch unrelated forward logic." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44029", + "accept": false, + "reason": "vision_encoder_decoder wrapper code and rwkv decoder internals are unrelated changes." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44001", + "accept": false, + "reason": "timm_backbone and univnet do not share a concrete bug or code path." + }, + { + "left": "pull_request:44029", + "right": "pull_request:44030", + "accept": false, + "reason": "rwkv and DPR are distinct models; similar output-tracing cleanup is not enough to merge." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44026", + "accept": false, + "reason": "depth_anything and vision_encoder_decoder are different model families; not the same underlying refactor." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44027", + "accept": false, + "reason": "depth_anything and speech_encoder_decoder have different forward contracts and code paths." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44028", + "accept": false, + "reason": "depth_anything and superpoint are unrelated vision models; no duplicate fix." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44027", + "accept": false, + "reason": "vision_encoder_decoder and speech_encoder_decoder are separate wrappers with different internals." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44028", + "accept": false, + "reason": "vision_encoder_decoder and superpoint do not appear to address the same underlying issue." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44028", + "accept": false, + "reason": "speech_encoder_decoder and superpoint are unrelated; same refactor pattern only." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44002", + "accept": false, + "reason": "univnet and upernet are different model families, so these are not duplicates." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 18, + "component_count": 24, + "review_unit_index": 2, + "review_unit_count": 2, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-43998-13", + "nodes": [ + "pull_request:43998", + "pull_request:44000", + "pull_request:44001", + "pull_request:44002", + "pull_request:44004", + "pull_request:44025", + "pull_request:44026", + "pull_request:44027", + "pull_request:44028", + "pull_request:44029", + "pull_request:44030" + ], + "soft_pairs": [ + "pull_request:44000|pull_request:44001", + "pull_request:44000|pull_request:44002", + "pull_request:43998|pull_request:44029", + "pull_request:44002|pull_request:44029", + "pull_request:43998|pull_request:44004", + "pull_request:44001|pull_request:44004", + "pull_request:44002|pull_request:44004", + "pull_request:44025|pull_request:44029", + "pull_request:44025|pull_request:44030", + "pull_request:44026|pull_request:44030", + "pull_request:44027|pull_request:44029", + "pull_request:44027|pull_request:44030", + "pull_request:44028|pull_request:44029", + "pull_request:44028|pull_request:44030", + "pull_request:44001|pull_request:44025", + "pull_request:44001|pull_request:44026", + "pull_request:44002|pull_request:44025", + "pull_request:44000|pull_request:44025", + "pull_request:44000|pull_request:44026" + ], + "prepared_review_unit_hash": "92e1d535679e2b51f763118af18a69a2b45127150f618cc787794f02dfa006d7", + "component_budget": { + "node_count": 13, + "item_count": 13, + "soft_pair_count": 43, + "serialized_chars": 41321, + "estimated_input_tokens": 10331, + "estimated_eval_tokens": 20918 + }, + "budget": { + "node_count": 11, + "item_count": 11, + "soft_pair_count": 19, + "serialized_chars": 26642, + "estimated_input_tokens": 6661, + "estimated_eval_tokens": 13578 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": true, + "analyst_result": { + "summary": "These PRs all follow the same broad refactor theme (\u201coutput tracing\u201d), but they target different model implementations and different code paths. They are not duplicates of one another and would not plausibly be merged into a single PR without becoming an unrelated multi-model sweep.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #44002 is the cleanest, most representative instance of the pattern: a direct `can_return_tuple`/`auto_docstring` refactor in one model, with the usual removal of explicit output-control args. It is a better canonical representative than the more complex multi-file or more specialized PRs.", + "best_issue_reason": null, + "best_pr_reason": "PR #44002 is the best single exemplar of the cluster\u2019s common refactor style, but it is not a duplicate of the other PRs. The rest are separate model-specific changes (different files and forward signatures), so there is no global merge candidate here.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44000", + "right": "pull_request:44001", + "accept": false, + "reason": "Same broad refactor label, but different models (`vision_text_dual_encoder` vs `univnet`) and different forward APIs; not the same underlying bug/change." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44002", + "accept": false, + "reason": "Different model subsystems and different signatures; both add output-tracing helpers, but they are independent PRs, not one fix." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44029", + "accept": false, + "reason": "`timm_backbone` refactor vs `rwkv` refactor; unrelated model code paths and different implementation details." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44029", + "accept": false, + "reason": "Both are output-tracing refactors, but for different architectures (`upernet` vs `rwkv`) with distinct forward behavior and changes." + }, + { + "left": "pull_request:43998", + "right": "pull_request:44004", + "accept": false, + "reason": "`timm_backbone` helper cleanup vs `codegen` cache/output-signature changes; not the same concrete change." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44004", + "accept": false, + "reason": "Different models and different code paths; the shared theme is too broad to treat as a duplicate." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44004", + "accept": false, + "reason": "`upernet` segmentation forward refactor and `codegen` generation/cache refactor are distinct changes, not mergeable as one fix." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44029", + "accept": false, + "reason": "`depth_anything`/`prompt_depth_anything` vs `rwkv`; separate model families and separate edits." + }, + { + "left": "pull_request:44025", + "right": "pull_request:44030", + "accept": false, + "reason": "Both are refactors around output handling, but they touch different models and code paths (`depth_anything` vs `dpr`)." + }, + { + "left": "pull_request:44026", + "right": "pull_request:44030", + "accept": false, + "reason": "`vision_encoder_decoder` wrapper changes are not the same underlying change as `DPR` encoder pooling/output handling." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44029", + "accept": false, + "reason": "`speech_encoder_decoder` and `rwkv` are unrelated implementations; the common helper usage is not enough for duplication." + }, + { + "left": "pull_request:44027", + "right": "pull_request:44030", + "accept": false, + "reason": "These affect different model wrappers and different forward signatures; not the same concrete fix." + }, + { + "left": "pull_request:44028", + "right": "pull_request:44029", + "accept": false, + "reason": "`superpoint` and `rwkv` have different forward logic and separate output-capturing rewrites." + }, + { + "left": "pull_request:44028", + "right": "pull_request:44030", + "accept": false, + "reason": "Different model code paths (`superpoint` vs `dpr`), so they are not duplicates." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44025", + "accept": false, + "reason": "Different architectures and different files; the similarity is only the generic output-tracing refactor pattern." + }, + { + "left": "pull_request:44001", + "right": "pull_request:44026", + "accept": false, + "reason": "`vision_text_dual_encoder` and `vision_encoder_decoder` are separate wrapper models; same theme, not same change." + }, + { + "left": "pull_request:44002", + "right": "pull_request:44025", + "accept": false, + "reason": "Separate model families and separate forward modifications; not one underlying bug or change." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44025", + "accept": false, + "reason": "`vision_text_dual_encoder` vs `depth_anything` are unrelated model-specific refactors." + }, + { + "left": "pull_request:44000", + "right": "pull_request:44026", + "accept": false, + "reason": "Different wrapper models and different API adjustments; too broad to merge as duplicate." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 19, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44662-3", + "nodes": [ + "pull_request:44662", + "pull_request:45075", + "pull_request:45401" + ], + "soft_pairs": [ + "pull_request:44662|pull_request:45401", + "pull_request:44662|pull_request:45075", + "pull_request:45075|pull_request:45401" + ], + "prepared_review_unit_hash": "cf3d9cd398ed5e933efc3b6da8287ed7e77abcfc393ba9007243c357623b1ca0", + "component_budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 11072, + "estimated_input_tokens": 2768, + "estimated_eval_tokens": 5792 + }, + "budget": { + "node_count": 3, + "item_count": 3, + "soft_pair_count": 3, + "serialized_chars": 11072, + "estimated_input_tokens": 2768, + "estimated_eval_tokens": 5792 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "All three pull requests are separate model-addition efforts with only shared boilerplate in Transformers registration/docs. They are not duplicates of the same underlying bug or change.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "No single PR is a clear canonical duplicate target because each implements a different model family (PenguinVL, DeepSeek-OCR-2, Voxtral TTS).", + "best_issue_reason": null, + "best_pr_reason": "PR 44662 is the strongest representative of this cluster: it is the most developed/reviewed implementation and includes the broadest set of files/tests, but it still addresses a distinct model from the others.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44662", + "right": "pull_request:45401", + "accept": false, + "reason": "Different model families and code paths: PenguinVL vs Voxtral TTS. Shared auto-registration/docs files are just framework boilerplate, not evidence of the same change." + }, + { + "left": "pull_request:44662", + "right": "pull_request:45075", + "accept": false, + "reason": "Different model families and implementations: PenguinVL vs DeepSeek-OCR-2. Overlap is limited to common Transformers scaffolding, so these should not be merged as duplicates." + }, + { + "left": "pull_request:45075", + "right": "pull_request:45401", + "accept": false, + "reason": "DeepSeek-OCR-2 and Voxtral TTS are unrelated model additions with separate architectures and targets; shared repo integration files are not enough to treat them as the same PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 20, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-44979-2", + "nodes": [ + "pull_request:44979", + "pull_request:45363" + ], + "soft_pairs": [ + "pull_request:44979|pull_request:45363" + ], + "prepared_review_unit_hash": "be59c07ba21b70572a8dfd36153554116d1a2bbd6feea888008cf887b07f0359", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4407, + "estimated_input_tokens": 1102, + "estimated_eval_tokens": 2460 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4407, + "estimated_input_tokens": 1102, + "estimated_eval_tokens": 2460 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "These two PRs are related by the broad theme of fusion, but they implement different things: one introduces a general Module Fusion API scaffold, while the other adds n-to-1 kernel fusion integration via KernelConfig. They are not duplicates.", + "confidence": 0.96, + "canonical_issue_reason": null, + "canonical_pr_reason": "No single canonical PR: #44979 is a standalone module-fusion API addition, while #45363 is a separate KernelConfig-based integration path with different files and behavior.", + "best_issue_reason": null, + "best_pr_reason": "#45363 is the more concrete, user-facing fusion feature because it wires fusion into model loading/execution, whereas #44979 is lower-level API scaffolding.", + "soft_edge_verdicts": [ + { + "left": "pull_request:44979", + "right": "pull_request:45363", + "accept": false, + "reason": "Different underlying changes: #44979 adds a new module_fusion API, while #45363 implements n-to-1 kernel fusion through KernelConfig and integration hooks. They do not appear mergeable as one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 21, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45086-2", + "nodes": [ + "pull_request:45086", + "pull_request:45317" + ], + "soft_pairs": [ + "pull_request:45086|pull_request:45317" + ], + "prepared_review_unit_hash": "2720e1d7ad812495e5ad99c11aaf92079832746a03e0b87b97fd6d136436b422", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4414, + "estimated_input_tokens": 1104, + "estimated_eval_tokens": 2464 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 4414, + "estimated_input_tokens": 1104, + "estimated_eval_tokens": 2464 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both pull requests address the same Mistral regex patch AttributeError by switching from `tokenizer.backend_tokenizer.pre_tokenizer` to `tokenizer.pre_tokenizer`; the later PR also adds test coverage, making it the stronger canonical representative.", + "confidence": 0.98, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45317 is the better canonical PR: it fixes the same concrete bug as 45086 but is more complete, with an added test and clearer title.", + "best_issue_reason": null, + "best_pr_reason": "PR 45317 is the best PR candidate because it contains the same code fix plus validation coverage, indicating a more finished and merge-ready change.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45086", + "right": "pull_request:45317", + "accept": true, + "reason": "Same underlying bug and same code-path fix in `src/transformers/tokenization_utils_tokenizers.py`; the second PR is essentially the first fix plus tests, so they are duplicates for triage." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 22, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45332-2", + "nodes": [ + "pull_request:45332", + "pull_request:45333" + ], + "soft_pairs": [ + "pull_request:45332|pull_request:45333" + ], + "prepared_review_unit_hash": "85bc9145ca0d48c162af7d1baec86c3cb92e1b036a01192d8738bed9bb2e772f", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5385, + "estimated_input_tokens": 1347, + "estimated_eval_tokens": 2950 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5385, + "estimated_input_tokens": 1347, + "estimated_eval_tokens": 2950 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two open PRs in the same heterogeneous-config feature area, but they are not duplicates: one is config-only, the other is a broader modeling/cache/masking implementation that likely builds on it.", + "confidence": 0.84, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #45332 is the best canonical representative because it covers the end-to-end heterogeneous model support path, including cache, masking, modeling, and tests, rather than only the config layer.", + "best_issue_reason": null, + "best_pr_reason": "PR #45332 is the strongest cluster anchor: it is the broader concrete implementation and most likely the main integration point for the feature.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45332", + "right": "pull_request:45333", + "accept": false, + "reason": "They share heterogeneous-config infrastructure, but #45333 is a config-focused PR while #45332 is a broader model/runtime PR. They are related feature work, not the same underlying code change or bug fix, and would not plausibly collapse into one PR without losing separation of concerns." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 23, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45351-2", + "nodes": [ + "pull_request:45351", + "pull_request:45427" + ], + "soft_pairs": [ + "pull_request:45351|pull_request:45427" + ], + "prepared_review_unit_hash": "d81d80e8bf31d6fdb0098909a80e75e153cb72d5c356c3dfc2d39f19b32665ba", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 3034, + "estimated_input_tokens": 759, + "estimated_eval_tokens": 1774 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 3034, + "estimated_input_tokens": 759, + "estimated_eval_tokens": 1774 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Two PRs target the same bug in `testing_utils.get_device_properties()`\u2014calling CUDA capability APIs when CUDA is unavailable. They are effectively duplicate fixes against the same file and same tracking issue.", + "confidence": 0.97, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR #45351 is the cleaner, safer fix: it imports `torch` first and then returns early when `torch.cuda.is_available()` is false, avoiding the capability call entirely.", + "best_issue_reason": null, + "best_pr_reason": "PR #45351 is the best representative because it directly addresses the failure path without introducing a pre-import reference to `torch` in the condition.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45351", + "right": "pull_request:45427", + "accept": true, + "reason": "Both PRs fix the same concrete bug in `src/transformers/testing_utils.py`: guarding CUDA capability access when CUDA is unavailable. The code paths and target issue match closely enough that they could be merged into one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + }, + { + "label": "pull_request", + "component_index": 24, + "component_count": 24, + "review_unit_index": 1, + "review_unit_count": 1, + "status": "reviewed", + "reason": null, + "source": "llm", + "cache_hit": false, + "model": "gpt-5.4-mini?service_tier=flex", + "cluster_id": "cluster-45415-2", + "nodes": [ + "pull_request:45415", + "pull_request:45425" + ], + "soft_pairs": [ + "pull_request:45415|pull_request:45425" + ], + "prepared_review_unit_hash": "d07ac36c4f75972060a8d43d10a04490cf0f0bd8efa701c17699376ac65bb3b6", + "component_budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5296, + "estimated_input_tokens": 1324, + "estimated_eval_tokens": 2904 + }, + "budget": { + "node_count": 2, + "item_count": 2, + "soft_pair_count": 1, + "serialized_chars": 5296, + "estimated_input_tokens": 1324, + "estimated_eval_tokens": 2904 + }, + "overflow_policy": "truncate_then_skip", + "trimmed": false, + "aggressively_trimmed": false, + "split": false, + "analyst_result": { + "summary": "Both pull requests are about typing/type-checking work in Transformers, but they address different scopes: one broadly adds type checking across many `src/transformers/*` files, while the other is a narrower typing/`ty`-configuration change centered on `modeling_utils` and related tooling/docs.", + "confidence": 0.88, + "canonical_issue_reason": null, + "canonical_pr_reason": "PR 45415 is the broader, more representative type-checking change across many Transformers modules and the pyproject typing rules, so it best captures the cluster\u2019s main theme.", + "best_issue_reason": null, + "best_pr_reason": "PR 45415 is the best overall representative because it is the more substantial and central typing cleanup; 45425 is narrower and more tooling/config-oriented.", + "soft_edge_verdicts": [ + { + "left": "pull_request:45415", + "right": "pull_request:45425", + "accept": false, + "reason": "Both are typing-related, but they do not appear to fix the same concrete code-path problem. 45415 is a broad multi-file type-checking pass, while 45425 is a narrower `ty`/`modeling_utils` typing adjustment; they would not naturally merge into one PR." + } + ] + }, + "evaluator_result": null, + "evaluator_used": false, + "retried": false, + "accepted_nontrivial_soft_edge": false, + "error_kind": null, + "error_message": null + } + ] +}