Upload 2025-08-28/runs/4915-17292963207/ci_results_run_models_gpu/model_results.json with huggingface_hub
Browse files
2025-08-28/runs/4915-17292963207/ci_results_run_models_gpu/model_results.json
ADDED
|
@@ -0,0 +1,440 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"models_gemma3": {
|
| 3 |
+
"failed": {
|
| 4 |
+
"PyTorch": {
|
| 5 |
+
"unclassified": 0,
|
| 6 |
+
"single": 2,
|
| 7 |
+
"multi": 2
|
| 8 |
+
},
|
| 9 |
+
"TensorFlow": {
|
| 10 |
+
"unclassified": 0,
|
| 11 |
+
"single": 0,
|
| 12 |
+
"multi": 0
|
| 13 |
+
},
|
| 14 |
+
"Flax": {
|
| 15 |
+
"unclassified": 0,
|
| 16 |
+
"single": 0,
|
| 17 |
+
"multi": 0
|
| 18 |
+
},
|
| 19 |
+
"Tokenizers": {
|
| 20 |
+
"unclassified": 0,
|
| 21 |
+
"single": 0,
|
| 22 |
+
"multi": 0
|
| 23 |
+
},
|
| 24 |
+
"Pipelines": {
|
| 25 |
+
"unclassified": 0,
|
| 26 |
+
"single": 0,
|
| 27 |
+
"multi": 0
|
| 28 |
+
},
|
| 29 |
+
"Trainer": {
|
| 30 |
+
"unclassified": 0,
|
| 31 |
+
"single": 0,
|
| 32 |
+
"multi": 0
|
| 33 |
+
},
|
| 34 |
+
"ONNX": {
|
| 35 |
+
"unclassified": 0,
|
| 36 |
+
"single": 0,
|
| 37 |
+
"multi": 0
|
| 38 |
+
},
|
| 39 |
+
"Auto": {
|
| 40 |
+
"unclassified": 0,
|
| 41 |
+
"single": 0,
|
| 42 |
+
"multi": 0
|
| 43 |
+
},
|
| 44 |
+
"Quantization": {
|
| 45 |
+
"unclassified": 0,
|
| 46 |
+
"single": 0,
|
| 47 |
+
"multi": 0
|
| 48 |
+
},
|
| 49 |
+
"Unclassified": {
|
| 50 |
+
"unclassified": 0,
|
| 51 |
+
"single": 0,
|
| 52 |
+
"multi": 0
|
| 53 |
+
}
|
| 54 |
+
},
|
| 55 |
+
"errors": 0,
|
| 56 |
+
"success": 572,
|
| 57 |
+
"skipped": 250,
|
| 58 |
+
"time_spent": [
|
| 59 |
+
1115.41,
|
| 60 |
+
1115.86
|
| 61 |
+
],
|
| 62 |
+
"failures": {
|
| 63 |
+
"multi": [
|
| 64 |
+
{
|
| 65 |
+
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3ModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids",
|
| 66 |
+
"trace": "(line 4271) AssertionError: Tensor-likes are not close!"
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3ModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs",
|
| 70 |
+
"trace": "(line 4271) AssertionError: Tensor-likes are not close!"
|
| 71 |
+
}
|
| 72 |
+
],
|
| 73 |
+
"single": [
|
| 74 |
+
{
|
| 75 |
+
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3ModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids",
|
| 76 |
+
"trace": "(line 4271) AssertionError: Tensor-likes are not close!"
|
| 77 |
+
},
|
| 78 |
+
{
|
| 79 |
+
"line": "tests/models/gemma3/test_modeling_gemma3.py::Gemma3ModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs",
|
| 80 |
+
"trace": "(line 4271) AssertionError: Tensor-likes are not close!"
|
| 81 |
+
}
|
| 82 |
+
]
|
| 83 |
+
},
|
| 84 |
+
"job_link": {
|
| 85 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17292963207/job/49084660529",
|
| 86 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17292963207/job/49084660484"
|
| 87 |
+
}
|
| 88 |
+
},
|
| 89 |
+
"models_gemma3n": {
|
| 90 |
+
"failed": {
|
| 91 |
+
"PyTorch": {
|
| 92 |
+
"unclassified": 0,
|
| 93 |
+
"single": 2,
|
| 94 |
+
"multi": 2
|
| 95 |
+
},
|
| 96 |
+
"TensorFlow": {
|
| 97 |
+
"unclassified": 0,
|
| 98 |
+
"single": 0,
|
| 99 |
+
"multi": 0
|
| 100 |
+
},
|
| 101 |
+
"Flax": {
|
| 102 |
+
"unclassified": 0,
|
| 103 |
+
"single": 0,
|
| 104 |
+
"multi": 0
|
| 105 |
+
},
|
| 106 |
+
"Tokenizers": {
|
| 107 |
+
"unclassified": 0,
|
| 108 |
+
"single": 0,
|
| 109 |
+
"multi": 0
|
| 110 |
+
},
|
| 111 |
+
"Pipelines": {
|
| 112 |
+
"unclassified": 0,
|
| 113 |
+
"single": 0,
|
| 114 |
+
"multi": 0
|
| 115 |
+
},
|
| 116 |
+
"Trainer": {
|
| 117 |
+
"unclassified": 0,
|
| 118 |
+
"single": 0,
|
| 119 |
+
"multi": 0
|
| 120 |
+
},
|
| 121 |
+
"ONNX": {
|
| 122 |
+
"unclassified": 0,
|
| 123 |
+
"single": 0,
|
| 124 |
+
"multi": 0
|
| 125 |
+
},
|
| 126 |
+
"Auto": {
|
| 127 |
+
"unclassified": 0,
|
| 128 |
+
"single": 0,
|
| 129 |
+
"multi": 0
|
| 130 |
+
},
|
| 131 |
+
"Quantization": {
|
| 132 |
+
"unclassified": 0,
|
| 133 |
+
"single": 0,
|
| 134 |
+
"multi": 0
|
| 135 |
+
},
|
| 136 |
+
"Unclassified": {
|
| 137 |
+
"unclassified": 0,
|
| 138 |
+
"single": 0,
|
| 139 |
+
"multi": 0
|
| 140 |
+
}
|
| 141 |
+
},
|
| 142 |
+
"errors": 0,
|
| 143 |
+
"success": 301,
|
| 144 |
+
"skipped": 693,
|
| 145 |
+
"time_spent": [
|
| 146 |
+
200.63,
|
| 147 |
+
197.74
|
| 148 |
+
],
|
| 149 |
+
"failures": {
|
| 150 |
+
"multi": [
|
| 151 |
+
{
|
| 152 |
+
"line": "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_flash_attn_2_fp32_ln",
|
| 153 |
+
"trace": "(line 1124) RuntimeError: result type Float can't be cast to the desired output type unsigned char"
|
| 154 |
+
},
|
| 155 |
+
{
|
| 156 |
+
"line": "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_flash_attn_2_inference_equivalence_right_padding",
|
| 157 |
+
"trace": "(line 3589) AssertionError: assert False"
|
| 158 |
+
}
|
| 159 |
+
],
|
| 160 |
+
"single": [
|
| 161 |
+
{
|
| 162 |
+
"line": "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_flash_attn_2_fp32_ln",
|
| 163 |
+
"trace": "(line 1124) RuntimeError: result type Float can't be cast to the desired output type unsigned char"
|
| 164 |
+
},
|
| 165 |
+
{
|
| 166 |
+
"line": "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_flash_attn_2_inference_equivalence_right_padding",
|
| 167 |
+
"trace": "(line 3589) AssertionError: assert False"
|
| 168 |
+
}
|
| 169 |
+
]
|
| 170 |
+
},
|
| 171 |
+
"job_link": {
|
| 172 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17292963207/job/49084660541",
|
| 173 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17292963207/job/49084660499"
|
| 174 |
+
}
|
| 175 |
+
},
|
| 176 |
+
"models_got_ocr2": {
|
| 177 |
+
"failed": {
|
| 178 |
+
"PyTorch": {
|
| 179 |
+
"unclassified": 0,
|
| 180 |
+
"single": 0,
|
| 181 |
+
"multi": 0
|
| 182 |
+
},
|
| 183 |
+
"TensorFlow": {
|
| 184 |
+
"unclassified": 0,
|
| 185 |
+
"single": 0,
|
| 186 |
+
"multi": 0
|
| 187 |
+
},
|
| 188 |
+
"Flax": {
|
| 189 |
+
"unclassified": 0,
|
| 190 |
+
"single": 0,
|
| 191 |
+
"multi": 0
|
| 192 |
+
},
|
| 193 |
+
"Tokenizers": {
|
| 194 |
+
"unclassified": 0,
|
| 195 |
+
"single": 0,
|
| 196 |
+
"multi": 0
|
| 197 |
+
},
|
| 198 |
+
"Pipelines": {
|
| 199 |
+
"unclassified": 0,
|
| 200 |
+
"single": 0,
|
| 201 |
+
"multi": 0
|
| 202 |
+
},
|
| 203 |
+
"Trainer": {
|
| 204 |
+
"unclassified": 0,
|
| 205 |
+
"single": 0,
|
| 206 |
+
"multi": 0
|
| 207 |
+
},
|
| 208 |
+
"ONNX": {
|
| 209 |
+
"unclassified": 0,
|
| 210 |
+
"single": 0,
|
| 211 |
+
"multi": 0
|
| 212 |
+
},
|
| 213 |
+
"Auto": {
|
| 214 |
+
"unclassified": 0,
|
| 215 |
+
"single": 0,
|
| 216 |
+
"multi": 0
|
| 217 |
+
},
|
| 218 |
+
"Quantization": {
|
| 219 |
+
"unclassified": 0,
|
| 220 |
+
"single": 0,
|
| 221 |
+
"multi": 0
|
| 222 |
+
},
|
| 223 |
+
"Unclassified": {
|
| 224 |
+
"unclassified": 0,
|
| 225 |
+
"single": 0,
|
| 226 |
+
"multi": 0
|
| 227 |
+
}
|
| 228 |
+
},
|
| 229 |
+
"errors": 0,
|
| 230 |
+
"success": 257,
|
| 231 |
+
"skipped": 335,
|
| 232 |
+
"time_spent": [
|
| 233 |
+
130.19,
|
| 234 |
+
129.99
|
| 235 |
+
],
|
| 236 |
+
"failures": {},
|
| 237 |
+
"job_link": {
|
| 238 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17292963207/job/49084660537",
|
| 239 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17292963207/job/49084660518"
|
| 240 |
+
}
|
| 241 |
+
},
|
| 242 |
+
"models_internvl": {
|
| 243 |
+
"failed": {
|
| 244 |
+
"PyTorch": {
|
| 245 |
+
"unclassified": 0,
|
| 246 |
+
"single": 1,
|
| 247 |
+
"multi": 1
|
| 248 |
+
},
|
| 249 |
+
"TensorFlow": {
|
| 250 |
+
"unclassified": 0,
|
| 251 |
+
"single": 0,
|
| 252 |
+
"multi": 0
|
| 253 |
+
},
|
| 254 |
+
"Flax": {
|
| 255 |
+
"unclassified": 0,
|
| 256 |
+
"single": 0,
|
| 257 |
+
"multi": 0
|
| 258 |
+
},
|
| 259 |
+
"Tokenizers": {
|
| 260 |
+
"unclassified": 0,
|
| 261 |
+
"single": 0,
|
| 262 |
+
"multi": 0
|
| 263 |
+
},
|
| 264 |
+
"Pipelines": {
|
| 265 |
+
"unclassified": 0,
|
| 266 |
+
"single": 0,
|
| 267 |
+
"multi": 0
|
| 268 |
+
},
|
| 269 |
+
"Trainer": {
|
| 270 |
+
"unclassified": 0,
|
| 271 |
+
"single": 0,
|
| 272 |
+
"multi": 0
|
| 273 |
+
},
|
| 274 |
+
"ONNX": {
|
| 275 |
+
"unclassified": 0,
|
| 276 |
+
"single": 0,
|
| 277 |
+
"multi": 0
|
| 278 |
+
},
|
| 279 |
+
"Auto": {
|
| 280 |
+
"unclassified": 0,
|
| 281 |
+
"single": 0,
|
| 282 |
+
"multi": 0
|
| 283 |
+
},
|
| 284 |
+
"Quantization": {
|
| 285 |
+
"unclassified": 0,
|
| 286 |
+
"single": 0,
|
| 287 |
+
"multi": 0
|
| 288 |
+
},
|
| 289 |
+
"Unclassified": {
|
| 290 |
+
"unclassified": 0,
|
| 291 |
+
"single": 0,
|
| 292 |
+
"multi": 0
|
| 293 |
+
}
|
| 294 |
+
},
|
| 295 |
+
"errors": 0,
|
| 296 |
+
"success": 363,
|
| 297 |
+
"skipped": 235,
|
| 298 |
+
"time_spent": [
|
| 299 |
+
280.89,
|
| 300 |
+
279.91
|
| 301 |
+
],
|
| 302 |
+
"failures": {
|
| 303 |
+
"multi": [
|
| 304 |
+
{
|
| 305 |
+
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLModelTest::test_flex_attention_with_grads",
|
| 306 |
+
"trace": "(line 573) torch._inductor.exc.InductorError: RuntimeError: No valid triton configs. OutOfMemoryError: out of resource: triton_tem_fused_0 Required: 106496 Hardware limit:101376 Reducing block sizes or `num_stages` may help."
|
| 307 |
+
}
|
| 308 |
+
],
|
| 309 |
+
"single": [
|
| 310 |
+
{
|
| 311 |
+
"line": "tests/models/internvl/test_modeling_internvl.py::InternVLModelTest::test_flex_attention_with_grads",
|
| 312 |
+
"trace": "(line 573) torch._inductor.exc.InductorError: RuntimeError: No valid triton configs. OutOfMemoryError: out of resource: triton_tem_fused_0 Required: 106496 Hardware limit:101376 Reducing block sizes or `num_stages` may help."
|
| 313 |
+
}
|
| 314 |
+
]
|
| 315 |
+
},
|
| 316 |
+
"job_link": {
|
| 317 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17292963207/job/49084660531",
|
| 318 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17292963207/job/49084660516"
|
| 319 |
+
}
|
| 320 |
+
},
|
| 321 |
+
"models_qwen2_5_omni": {
|
| 322 |
+
"failed": {
|
| 323 |
+
"PyTorch": {
|
| 324 |
+
"unclassified": 0,
|
| 325 |
+
"single": 3,
|
| 326 |
+
"multi": 5
|
| 327 |
+
},
|
| 328 |
+
"TensorFlow": {
|
| 329 |
+
"unclassified": 0,
|
| 330 |
+
"single": 0,
|
| 331 |
+
"multi": 0
|
| 332 |
+
},
|
| 333 |
+
"Flax": {
|
| 334 |
+
"unclassified": 0,
|
| 335 |
+
"single": 0,
|
| 336 |
+
"multi": 0
|
| 337 |
+
},
|
| 338 |
+
"Tokenizers": {
|
| 339 |
+
"unclassified": 0,
|
| 340 |
+
"single": 0,
|
| 341 |
+
"multi": 0
|
| 342 |
+
},
|
| 343 |
+
"Pipelines": {
|
| 344 |
+
"unclassified": 0,
|
| 345 |
+
"single": 0,
|
| 346 |
+
"multi": 0
|
| 347 |
+
},
|
| 348 |
+
"Trainer": {
|
| 349 |
+
"unclassified": 0,
|
| 350 |
+
"single": 0,
|
| 351 |
+
"multi": 0
|
| 352 |
+
},
|
| 353 |
+
"ONNX": {
|
| 354 |
+
"unclassified": 0,
|
| 355 |
+
"single": 0,
|
| 356 |
+
"multi": 0
|
| 357 |
+
},
|
| 358 |
+
"Auto": {
|
| 359 |
+
"unclassified": 0,
|
| 360 |
+
"single": 0,
|
| 361 |
+
"multi": 0
|
| 362 |
+
},
|
| 363 |
+
"Quantization": {
|
| 364 |
+
"unclassified": 0,
|
| 365 |
+
"single": 0,
|
| 366 |
+
"multi": 0
|
| 367 |
+
},
|
| 368 |
+
"Unclassified": {
|
| 369 |
+
"unclassified": 0,
|
| 370 |
+
"single": 2,
|
| 371 |
+
"multi": 2
|
| 372 |
+
}
|
| 373 |
+
},
|
| 374 |
+
"errors": 0,
|
| 375 |
+
"success": 301,
|
| 376 |
+
"skipped": 129,
|
| 377 |
+
"time_spent": [
|
| 378 |
+
182.94,
|
| 379 |
+
211.94
|
| 380 |
+
],
|
| 381 |
+
"failures": {
|
| 382 |
+
"multi": [
|
| 383 |
+
{
|
| 384 |
+
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
|
| 385 |
+
"trace": "(line 1052) RuntimeError: split_with_sizes expects split_sizes to sum exactly to 16 (input tensor's size at dimension -1), but got split_sizes=[1, 1, 2, 1, 1, 2]"
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding",
|
| 389 |
+
"trace": "(line 1052) RuntimeError: split_with_sizes expects split_sizes to sum exactly to 16 (input tensor's size at dimension -1), but got split_sizes=[1, 1, 2, 1, 1, 2]"
|
| 390 |
+
},
|
| 391 |
+
{
|
| 392 |
+
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_batch_flashatt2",
|
| 393 |
+
"trace": "(line 675) AssertionError: Lists differ: [\"sys[139 chars] dog is a Labrador Retriever.\", \"system\\nYou a[155 chars]er.\"] != [\"sys[139 chars] dog appears to be a Labrador Retriever.\", \"sy[177 chars]er.\"]"
|
| 394 |
+
},
|
| 395 |
+
{
|
| 396 |
+
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_multiturn",
|
| 397 |
+
"trace": "(line 869) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU 1 has a total capacity of 22.18 GiB of which 16.50 MiB is free. Process 60691 has 22.16 GiB memory in use. Of the allocated memory 21.72 GiB is allocated by PyTorch, and 28.05 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_w_audio",
|
| 401 |
+
"trace": "(line 199) torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU 1 has a total capacity of 22.18 GiB of which 18.50 MiB is free. Process 60691 has 22.16 GiB memory in use. Of the allocated memory 21.73 GiB is allocated by PyTorch, and 16.33 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)"
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"line": "tests/models/qwen2_5_omni/test_processing_qwen2_5_omni.py::Qwen2_5OmniProcessorTest::test_apply_chat_template_audio_2",
|
| 405 |
+
"trace": "(line 829) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?"
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"line": "tests/models/qwen2_5_omni/test_processing_qwen2_5_omni.py::Qwen2_5OmniProcessorTest::test_apply_chat_template_audio_3",
|
| 409 |
+
"trace": "(line 829) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?"
|
| 410 |
+
}
|
| 411 |
+
],
|
| 412 |
+
"single": [
|
| 413 |
+
{
|
| 414 |
+
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
|
| 415 |
+
"trace": "(line 1052) RuntimeError: split_with_sizes expects split_sizes to sum exactly to 16 (input tensor's size at dimension -1), but got split_sizes=[1, 1, 2, 1, 1, 2]"
|
| 416 |
+
},
|
| 417 |
+
{
|
| 418 |
+
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding",
|
| 419 |
+
"trace": "(line 1052) RuntimeError: split_with_sizes expects split_sizes to sum exactly to 16 (input tensor's size at dimension -1), but got split_sizes=[1, 1, 2, 1, 1, 2]"
|
| 420 |
+
},
|
| 421 |
+
{
|
| 422 |
+
"line": "tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_batch_flashatt2",
|
| 423 |
+
"trace": "(line 675) AssertionError: Lists differ: [\"sys[139 chars] dog is a Labrador Retriever.\", \"system\\nYou a[155 chars]er.\"] != [\"sys[139 chars] dog appears to be a Labrador Retriever.\", \"sy[177 chars]er.\"]"
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"line": "tests/models/qwen2_5_omni/test_processing_qwen2_5_omni.py::Qwen2_5OmniProcessorTest::test_apply_chat_template_audio_2",
|
| 427 |
+
"trace": "(line 829) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?"
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"line": "tests/models/qwen2_5_omni/test_processing_qwen2_5_omni.py::Qwen2_5OmniProcessorTest::test_apply_chat_template_audio_3",
|
| 431 |
+
"trace": "(line 829) RuntimeError: The frame has 0 channels, expected 1. If you are hitting this, it may be because you are using a buggy FFmpeg version. FFmpeg4 is known to fail here in some valid scenarios. Try to upgrade FFmpeg?"
|
| 432 |
+
}
|
| 433 |
+
]
|
| 434 |
+
},
|
| 435 |
+
"job_link": {
|
| 436 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/17292963207/job/49084660538",
|
| 437 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/17292963207/job/49084660525"
|
| 438 |
+
}
|
| 439 |
+
}
|
| 440 |
+
}
|