hf-transformers-bot commited on
Commit
bc24074
·
verified ·
1 Parent(s): 86de67c

Upload 2025-10-28/ci_results_run_models_gpu/new_failures.json with huggingface_hub

Browse files
2025-10-28/ci_results_run_models_gpu/new_failures.json ADDED
@@ -0,0 +1,704 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bamba": {
3
+ "single-gpu": [
4
+ "tests/models/bamba/test_modeling_bamba.py::BambaModelTest::test_flash_attn_2_inference_equivalence",
5
+ "tests/models/bamba/test_modeling_bamba.py::BambaModelTest::test_flash_attn_2_inference_equivalence_right_padding"
6
+ ],
7
+ "multi-gpu": [
8
+ "tests/models/bamba/test_modeling_bamba.py::BambaModelTest::test_flash_attn_2_inference_equivalence",
9
+ "tests/models/bamba/test_modeling_bamba.py::BambaModelTest::test_flash_attn_2_inference_equivalence_right_padding"
10
+ ]
11
+ },
12
+ "bark": {
13
+ "single-gpu": [
14
+ "tests/models/bark/test_modeling_bark.py::BarkSemanticModelTest::test_eager_matches_fa2_generate",
15
+ "tests/models/bark/test_modeling_bark.py::BarkSemanticModelTest::test_flash_attention_2_continue_generate_with_position_ids",
16
+ "tests/models/bark/test_modeling_bark.py::BarkCoarseModelTest::test_eager_matches_fa2_generate",
17
+ "tests/models/bark/test_modeling_bark.py::BarkCoarseModelTest::test_flash_attention_2_continue_generate_with_position_ids"
18
+ ],
19
+ "multi-gpu": [
20
+ "tests/models/bark/test_modeling_bark.py::BarkSemanticModelTest::test_eager_matches_fa2_generate",
21
+ "tests/models/bark/test_modeling_bark.py::BarkSemanticModelTest::test_flash_attention_2_continue_generate_with_position_ids",
22
+ "tests/models/bark/test_modeling_bark.py::BarkCoarseModelTest::test_eager_matches_fa2_generate",
23
+ "tests/models/bark/test_modeling_bark.py::BarkCoarseModelTest::test_flash_attention_2_continue_generate_with_position_ids"
24
+ ]
25
+ },
26
+ "blt": {
27
+ "single-gpu": [
28
+ "tests/models/blt/test_modeling_blt.py::BltModelTest::test_flash_attn_2_can_compile_with_attention_mask_None_without_graph_break"
29
+ ],
30
+ "multi-gpu": [
31
+ "tests/models/blt/test_modeling_blt.py::BltModelTest::test_flash_attn_2_can_compile_with_attention_mask_None_without_graph_break"
32
+ ]
33
+ },
34
+ "chameleon": {
35
+ "single-gpu": [
36
+ "tests/models/chameleon/test_modeling_chameleon.py::ChameleonModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids"
37
+ ],
38
+ "multi-gpu": [
39
+ "tests/models/chameleon/test_modeling_chameleon.py::ChameleonModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids",
40
+ "tests/models/chameleon/test_modeling_chameleon.py::ChameleonVision2SeqModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs"
41
+ ]
42
+ },
43
+ "clap": {
44
+ "single-gpu": [
45
+ "tests/models/clap/test_modeling_clap.py::ClapAudioModelTest::test_flash_attn_2_inference_equivalence",
46
+ "tests/models/clap/test_modeling_clap.py::ClapAudioModelTest::test_flash_attn_2_inference_equivalence_right_padding",
47
+ "tests/models/clap/test_modeling_clap.py::ClapModelTest::test_flash_attn_2_inference_equivalence",
48
+ "tests/models/clap/test_modeling_clap.py::ClapModelTest::test_flash_attn_2_inference_equivalence_right_padding"
49
+ ],
50
+ "multi-gpu": [
51
+ "tests/models/clap/test_modeling_clap.py::ClapAudioModelTest::test_flash_attn_2_inference_equivalence",
52
+ "tests/models/clap/test_modeling_clap.py::ClapAudioModelTest::test_flash_attn_2_inference_equivalence_right_padding",
53
+ "tests/models/clap/test_modeling_clap.py::ClapModelTest::test_flash_attn_2_inference_equivalence",
54
+ "tests/models/clap/test_modeling_clap.py::ClapModelTest::test_flash_attn_2_inference_equivalence_right_padding"
55
+ ]
56
+ },
57
+ "colpali": {
58
+ "single-gpu": [
59
+ "tests/models/colpali/test_modeling_colpali.py::ColPaliForRetrievalModelTest::test_flash_attn_2_inference_equivalence",
60
+ "tests/models/colpali/test_modeling_colpali.py::ColPaliForRetrievalModelTest::test_flash_attn_2_inference_equivalence_right_padding"
61
+ ],
62
+ "multi-gpu": [
63
+ "tests/models/colpali/test_modeling_colpali.py::ColPaliForRetrievalModelTest::test_flash_attn_2_inference_equivalence",
64
+ "tests/models/colpali/test_modeling_colpali.py::ColPaliForRetrievalModelTest::test_flash_attn_2_inference_equivalence_right_padding"
65
+ ]
66
+ },
67
+ "data2vec": {
68
+ "single-gpu": [
69
+ "tests/models/data2vec/test_modeling_data2vec_text.py::Data2VecTextModelTest::test_flash_attn_2_inference_equivalence"
70
+ ],
71
+ "multi-gpu": []
72
+ },
73
+ "deepseek_v2": {
74
+ "single-gpu": [
75
+ "tests/models/deepseek_v2/test_modeling_deepseek_v2.py::DeepseekV2ModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids",
76
+ "tests/models/deepseek_v2/test_modeling_deepseek_v2.py::DeepseekV2ModelTest::test_flash_attn_2_can_compile_with_attention_mask_None_without_graph_break",
77
+ "tests/models/deepseek_v2/test_modeling_deepseek_v2.py::DeepseekV2ModelTest::test_flash_attn_2_fp32_ln"
78
+ ],
79
+ "multi-gpu": [
80
+ "tests/models/deepseek_v2/test_modeling_deepseek_v2.py::DeepseekV2ModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids",
81
+ "tests/models/deepseek_v2/test_modeling_deepseek_v2.py::DeepseekV2ModelTest::test_flash_attn_2_can_compile_with_attention_mask_None_without_graph_break",
82
+ "tests/models/deepseek_v2/test_modeling_deepseek_v2.py::DeepseekV2ModelTest::test_flash_attn_2_fp32_ln"
83
+ ]
84
+ },
85
+ "deepseek_vl_hybrid": {
86
+ "single-gpu": [
87
+ "tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py::DeepseekVLHybridModelTest::test_flash_attention_2_continue_generate_with_position_ids",
88
+ "tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py::DeepseekVLHybridModelTest::test_flash_attn_2_fp32_ln",
89
+ "tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py::DeepseekVLHybridModelTest::test_flash_attn_2_from_config"
90
+ ],
91
+ "multi-gpu": [
92
+ "tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py::DeepseekVLHybridModelTest::test_flash_attention_2_continue_generate_with_position_ids",
93
+ "tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py::DeepseekVLHybridModelTest::test_flash_attn_2_fp32_ln",
94
+ "tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py::DeepseekVLHybridModelTest::test_flash_attn_2_from_config"
95
+ ]
96
+ },
97
+ "diffllama": {
98
+ "single-gpu": [
99
+ "tests/models/diffllama/test_modeling_diffllama.py::DiffLlamaModelTest::test_flash_attn_2_generate_padding_right",
100
+ "tests/models/diffllama/test_modeling_diffllama.py::DiffLlamaModelTest::test_flash_attn_2_inference_equivalence",
101
+ "tests/models/diffllama/test_modeling_diffllama.py::DiffLlamaModelTest::test_flash_attn_2_inference_equivalence_right_padding"
102
+ ],
103
+ "multi-gpu": [
104
+ "tests/models/diffllama/test_modeling_diffllama.py::DiffLlamaModelTest::test_flash_attn_2_generate_padding_right",
105
+ "tests/models/diffllama/test_modeling_diffllama.py::DiffLlamaModelTest::test_flash_attn_2_inference_equivalence",
106
+ "tests/models/diffllama/test_modeling_diffllama.py::DiffLlamaModelTest::test_flash_attn_2_inference_equivalence_right_padding"
107
+ ]
108
+ },
109
+ "donut": {
110
+ "single-gpu": [
111
+ "tests/models/donut/test_modeling_donut_swin.py::DonutSwinModelTest::test_flash_attn_2_inference_equivalence",
112
+ "tests/models/donut/test_modeling_donut_swin.py::DonutSwinModelTest::test_flash_attn_2_inference_equivalence_right_padding"
113
+ ],
114
+ "multi-gpu": [
115
+ "tests/models/donut/test_modeling_donut_swin.py::DonutSwinModelTest::test_flash_attn_2_inference_equivalence",
116
+ "tests/models/donut/test_modeling_donut_swin.py::DonutSwinModelTest::test_flash_attn_2_inference_equivalence_right_padding"
117
+ ]
118
+ },
119
+ "efficientloftr": {
120
+ "single-gpu": [
121
+ "tests/models/efficientloftr/test_modeling_efficientloftr.py::EfficientLoFTRModelTest::test_flash_attn_2_inference_equivalence",
122
+ "tests/models/efficientloftr/test_modeling_efficientloftr.py::EfficientLoFTRModelTest::test_flash_attn_2_inference_equivalence_right_padding"
123
+ ],
124
+ "multi-gpu": [
125
+ "tests/models/efficientloftr/test_modeling_efficientloftr.py::EfficientLoFTRModelTest::test_flash_attn_2_inference_equivalence",
126
+ "tests/models/efficientloftr/test_modeling_efficientloftr.py::EfficientLoFTRModelTest::test_flash_attn_2_inference_equivalence_right_padding"
127
+ ]
128
+ },
129
+ "emu3": {
130
+ "single-gpu": [
131
+ "tests/models/emu3/test_modeling_emu3.py::Emu3Vision2TextModelTest::test_flash_attn_2_inference_equivalence",
132
+ "tests/models/emu3/test_modeling_emu3.py::Emu3Vision2TextModelTest::test_flash_attn_2_inference_equivalence_right_padding"
133
+ ],
134
+ "multi-gpu": [
135
+ "tests/models/emu3/test_modeling_emu3.py::Emu3Vision2TextModelTest::test_flash_attn_2_inference_equivalence",
136
+ "tests/models/emu3/test_modeling_emu3.py::Emu3Vision2TextModelTest::test_flash_attn_2_inference_equivalence_right_padding"
137
+ ]
138
+ },
139
+ "ernie4_5_moe": {
140
+ "single-gpu": [],
141
+ "multi-gpu": [
142
+ "tests/models/ernie4_5_moe/test_modeling_ernie4_5_moe.py::Ernie4_5_MoeModelTest::test_flash_attn_2_equivalence"
143
+ ]
144
+ },
145
+ "exaone4": {
146
+ "single-gpu": [
147
+ "tests/models/exaone4/test_modeling_exaone4.py::Exaone4ModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids"
148
+ ],
149
+ "multi-gpu": [
150
+ "tests/models/exaone4/test_modeling_exaone4.py::Exaone4ModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids",
151
+ "tests/models/exaone4/test_modeling_exaone4.py::Exaone4IntegrationTest::test_model_generation_long_flash"
152
+ ]
153
+ },
154
+ "falcon": {
155
+ "single-gpu": [
156
+ "tests/models/falcon/test_modeling_falcon.py::FalconModelTest::test_flash_attn_2_inference_equivalence",
157
+ "tests/models/falcon/test_modeling_falcon.py::FalconModelTest::test_flash_attn_2_inference_equivalence_right_padding"
158
+ ],
159
+ "multi-gpu": [
160
+ "tests/models/falcon/test_modeling_falcon.py::FalconModelTest::test_flash_attn_2_inference_equivalence",
161
+ "tests/models/falcon/test_modeling_falcon.py::FalconModelTest::test_flash_attn_2_inference_equivalence_right_padding"
162
+ ]
163
+ },
164
+ "flex_olmo": {
165
+ "single-gpu": [
166
+ "tests/models/flex_olmo/test_modeling_flex_olmo.py::FlexOlmoModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids",
167
+ "tests/models/flex_olmo/test_modeling_flex_olmo.py::FlexOlmoModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs",
168
+ "tests/models/flex_olmo/test_modeling_flex_olmo.py::FlexOlmoModelTest::test_flash_attn_2_can_compile_with_attention_mask_None_without_graph_break"
169
+ ],
170
+ "multi-gpu": [
171
+ "tests/models/flex_olmo/test_modeling_flex_olmo.py::FlexOlmoModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids",
172
+ "tests/models/flex_olmo/test_modeling_flex_olmo.py::FlexOlmoModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs",
173
+ "tests/models/flex_olmo/test_modeling_flex_olmo.py::FlexOlmoModelTest::test_flash_attn_2_can_compile_with_attention_mask_None_without_graph_break"
174
+ ]
175
+ },
176
+ "gemma3n": {
177
+ "single-gpu": [
178
+ "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_flash_attn_2_equivalence",
179
+ "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_flash_attn_2_inference_equivalence",
180
+ "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_flash_attn_2_inference_equivalence_right_padding"
181
+ ],
182
+ "multi-gpu": [
183
+ "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_flash_attn_2_equivalence",
184
+ "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_flash_attn_2_inference_equivalence",
185
+ "tests/models/gemma3n/test_modeling_gemma3n.py::Gemma3nTextModelTest::test_flash_attn_2_inference_equivalence_right_padding"
186
+ ]
187
+ },
188
+ "glm4": {
189
+ "single-gpu": [
190
+ "tests/models/glm4/test_modeling_glm4.py::Glm4ModelTest::test_flash_attn_2_equivalence"
191
+ ],
192
+ "multi-gpu": []
193
+ },
194
+ "glm4_moe": {
195
+ "single-gpu": [
196
+ "tests/models/glm4_moe/test_modeling_glm4_moe.py::Glm4MoeModelTest::test_flash_attn_2_can_compile_with_attention_mask_None_without_graph_break"
197
+ ],
198
+ "multi-gpu": [
199
+ "tests/models/glm4_moe/test_modeling_glm4_moe.py::Glm4MoeModelTest::test_flash_attn_2_can_compile_with_attention_mask_None_without_graph_break"
200
+ ]
201
+ },
202
+ "gpt2": {
203
+ "single-gpu": [
204
+ "tests/models/gpt2/test_modeling_gpt2.py::GPT2ModelLanguageGenerationTest::test_flash_attn_2_generate_padding_left"
205
+ ],
206
+ "multi-gpu": [
207
+ "tests/models/gpt2/test_modeling_gpt2.py::GPT2ModelLanguageGenerationTest::test_flash_attn_2_generate_padding_left"
208
+ ]
209
+ },
210
+ "gpt_oss": {
211
+ "single-gpu": [
212
+ "tests/models/gpt_oss/test_modeling_gpt_oss.py::GptOssModelTest::test_flash_attn_2_inference_equivalence",
213
+ "tests/models/gpt_oss/test_modeling_gpt_oss.py::GptOssModelTest::test_flash_attn_2_inference_equivalence_right_padding"
214
+ ],
215
+ "multi-gpu": [
216
+ "tests/models/gpt_oss/test_modeling_gpt_oss.py::GptOssModelTest::test_flash_attn_2_inference_equivalence",
217
+ "tests/models/gpt_oss/test_modeling_gpt_oss.py::GptOssModelTest::test_flash_attn_2_inference_equivalence_right_padding",
218
+ "tests/models/gpt_oss/test_modeling_gpt_oss.py::GptOssModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs"
219
+ ]
220
+ },
221
+ "granitemoe": {
222
+ "single-gpu": [
223
+ "tests/models/granitemoe/test_modeling_granitemoe.py::GraniteMoeModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs"
224
+ ],
225
+ "multi-gpu": [
226
+ "tests/models/granitemoe/test_modeling_granitemoe.py::GraniteMoeModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs"
227
+ ]
228
+ },
229
+ "granitemoehybrid": {
230
+ "single-gpu": [
231
+ "tests/models/granitemoehybrid/test_modeling_granitemoehybrid.py::BambaModelTest::test_flash_attn_2_inference_equivalence",
232
+ "tests/models/granitemoehybrid/test_modeling_granitemoehybrid.py::BambaModelTest::test_flash_attn_2_inference_equivalence_right_padding",
233
+ "tests/models/granitemoehybrid/test_modeling_granitemoehybrid.py::GraniteMoeHybridModelTest::test_flash_attn_2_inference_equivalence",
234
+ "tests/models/granitemoehybrid/test_modeling_granitemoehybrid.py::GraniteMoeHybridModelTest::test_flash_attn_2_inference_equivalence_right_padding"
235
+ ],
236
+ "multi-gpu": [
237
+ "tests/models/granitemoehybrid/test_modeling_granitemoehybrid.py::BambaModelTest::test_flash_attn_2_inference_equivalence",
238
+ "tests/models/granitemoehybrid/test_modeling_granitemoehybrid.py::BambaModelTest::test_flash_attn_2_inference_equivalence_right_padding",
239
+ "tests/models/granitemoehybrid/test_modeling_granitemoehybrid.py::GraniteMoeHybridModelTest::test_flash_attn_2_inference_equivalence",
240
+ "tests/models/granitemoehybrid/test_modeling_granitemoehybrid.py::GraniteMoeHybridModelTest::test_flash_attn_2_inference_equivalence_right_padding"
241
+ ]
242
+ },
243
+ "granitemoeshared": {
244
+ "single-gpu": [
245
+ "tests/models/granitemoeshared/test_modeling_granitemoeshared.py::GraniteMoeSharedModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs"
246
+ ],
247
+ "multi-gpu": [
248
+ "tests/models/granitemoeshared/test_modeling_granitemoeshared.py::GraniteMoeSharedModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs"
249
+ ]
250
+ },
251
+ "grounding_dino": {
252
+ "single-gpu": [
253
+ "tests/models/grounding_dino/test_modeling_grounding_dino.py::GroundingDinoModelTest::test_flash_attn_2_inference_equivalence",
254
+ "tests/models/grounding_dino/test_modeling_grounding_dino.py::GroundingDinoModelTest::test_flash_attn_2_inference_equivalence_right_padding"
255
+ ],
256
+ "multi-gpu": [
257
+ "tests/models/grounding_dino/test_modeling_grounding_dino.py::GroundingDinoModelTest::test_flash_attn_2_inference_equivalence",
258
+ "tests/models/grounding_dino/test_modeling_grounding_dino.py::GroundingDinoModelTest::test_flash_attn_2_inference_equivalence_right_padding"
259
+ ]
260
+ },
261
+ "instructblip": {
262
+ "single-gpu": [
263
+ "tests/models/instructblip/test_modeling_instructblip.py::InstructBlipForConditionalGenerationDecoderOnlyTest::test_flash_attn_2_fp32_ln",
264
+ "tests/models/instructblip/test_modeling_instructblip.py::InstructBlipForConditionalGenerationDecoderOnlyTest::test_flash_attn_2_from_config"
265
+ ],
266
+ "multi-gpu": [
267
+ "tests/models/instructblip/test_modeling_instructblip.py::InstructBlipForConditionalGenerationDecoderOnlyTest::test_flash_attn_2_fp32_ln",
268
+ "tests/models/instructblip/test_modeling_instructblip.py::InstructBlipForConditionalGenerationDecoderOnlyTest::test_flash_attn_2_from_config"
269
+ ]
270
+ },
271
+ "instructblipvideo": {
272
+ "single-gpu": [
273
+ "tests/models/instructblipvideo/test_modeling_instructblipvideo.py::InstructBlipVideoForConditionalGenerationDecoderOnlyTest::test_flash_attn_2_fp32_ln",
274
+ "tests/models/instructblipvideo/test_modeling_instructblipvideo.py::InstructBlipVideoForConditionalGenerationDecoderOnlyTest::test_flash_attn_2_from_config"
275
+ ],
276
+ "multi-gpu": [
277
+ "tests/models/instructblipvideo/test_modeling_instructblipvideo.py::InstructBlipVideoForConditionalGenerationDecoderOnlyTest::test_flash_attn_2_fp32_ln",
278
+ "tests/models/instructblipvideo/test_modeling_instructblipvideo.py::InstructBlipVideoForConditionalGenerationDecoderOnlyTest::test_flash_attn_2_from_config"
279
+ ]
280
+ },
281
+ "janus": {
282
+ "single-gpu": [
283
+ "tests/models/janus/test_modeling_janus.py::JanusVisionText2TextModelTest::test_flash_attn_2_inference_equivalence",
284
+ "tests/models/janus/test_modeling_janus.py::JanusVisionText2TextModelTest::test_flash_attn_2_inference_equivalence_right_padding"
285
+ ],
286
+ "multi-gpu": [
287
+ "tests/models/janus/test_modeling_janus.py::JanusVisionText2TextModelTest::test_flash_attn_2_inference_equivalence",
288
+ "tests/models/janus/test_modeling_janus.py::JanusVisionText2TextModelTest::test_flash_attn_2_inference_equivalence_right_padding"
289
+ ]
290
+ },
291
+ "jetmoe": {
292
+ "single-gpu": [
293
+ "tests/models/jetmoe/test_modeling_jetmoe.py::JetMoeModelTest::test_flash_attn_2_equivalence",
294
+ "tests/models/jetmoe/test_modeling_jetmoe.py::JetMoeModelTest::test_flash_attn_2_fp32_ln"
295
+ ],
296
+ "multi-gpu": [
297
+ "tests/models/jetmoe/test_modeling_jetmoe.py::JetMoeModelTest::test_flash_attn_2_fp32_ln"
298
+ ]
299
+ },
300
+ "kosmos2": {
301
+ "single-gpu": [
302
+ "tests/models/kosmos2/test_modeling_kosmos2.py::Kosmos2ModelTest::test_eager_matches_fa2_generate",
303
+ "tests/models/kosmos2/test_modeling_kosmos2.py::Kosmos2ModelTest::test_flash_attention_2_continue_generate_with_position_ids",
304
+ "tests/models/kosmos2/test_modeling_kosmos2.py::Kosmos2ModelTest::test_flash_attn_2_can_dispatch_composite_models",
305
+ "tests/models/kosmos2/test_modeling_kosmos2.py::Kosmos2ModelTest::test_flash_attn_2_fp32_ln",
306
+ "tests/models/kosmos2/test_modeling_kosmos2.py::Kosmos2ModelTest::test_flash_attn_2_from_config",
307
+ "tests/models/kosmos2/test_modeling_kosmos2.py::Kosmos2ModelTest::test_flash_attn_2_inference_equivalence",
308
+ "tests/models/kosmos2/test_modeling_kosmos2.py::Kosmos2ModelTest::test_flash_attn_2_inference_equivalence_right_padding"
309
+ ],
310
+ "multi-gpu": [
311
+ "tests/models/kosmos2/test_modeling_kosmos2.py::Kosmos2ModelTest::test_eager_matches_fa2_generate",
312
+ "tests/models/kosmos2/test_modeling_kosmos2.py::Kosmos2ModelTest::test_flash_attention_2_continue_generate_with_position_ids",
313
+ "tests/models/kosmos2/test_modeling_kosmos2.py::Kosmos2ModelTest::test_flash_attn_2_can_dispatch_composite_models",
314
+ "tests/models/kosmos2/test_modeling_kosmos2.py::Kosmos2ModelTest::test_flash_attn_2_fp32_ln",
315
+ "tests/models/kosmos2/test_modeling_kosmos2.py::Kosmos2ModelTest::test_flash_attn_2_from_config",
316
+ "tests/models/kosmos2/test_modeling_kosmos2.py::Kosmos2ModelTest::test_flash_attn_2_inference_equivalence",
317
+ "tests/models/kosmos2/test_modeling_kosmos2.py::Kosmos2ModelTest::test_flash_attn_2_inference_equivalence_right_padding"
318
+ ]
319
+ },
320
+ "kosmos2_5": {
321
+ "single-gpu": [
322
+ "tests/models/kosmos2_5/test_modeling_kosmos2_5.py::Kosmos2_5ModelTest::test_flash_attn_2_can_dispatch_composite_models"
323
+ ],
324
+ "multi-gpu": [
325
+ "tests/models/kosmos2_5/test_modeling_kosmos2_5.py::Kosmos2_5ModelTest::test_flash_attn_2_can_dispatch_composite_models"
326
+ ]
327
+ },
328
+ "kyutai_speech_to_text": {
329
+ "single-gpu": [
330
+ "tests/models/kyutai_speech_to_text/test_modeling_kyutai_speech_to_text.py::KyutaiSpeechToTextModelTest::test_eager_matches_fa2_generate",
331
+ "tests/models/kyutai_speech_to_text/test_modeling_kyutai_speech_to_text.py::KyutaiSpeechToTextModelTest::test_flash_attn_2_inference_equivalence",
332
+ "tests/models/kyutai_speech_to_text/test_modeling_kyutai_speech_to_text.py::KyutaiSpeechToTextModelTest::test_flash_attn_2_inference_equivalence_right_padding"
333
+ ],
334
+ "multi-gpu": [
335
+ "tests/models/kyutai_speech_to_text/test_modeling_kyutai_speech_to_text.py::KyutaiSpeechToTextModelTest::test_eager_matches_fa2_generate",
336
+ "tests/models/kyutai_speech_to_text/test_modeling_kyutai_speech_to_text.py::KyutaiSpeechToTextModelTest::test_flash_attn_2_inference_equivalence",
337
+ "tests/models/kyutai_speech_to_text/test_modeling_kyutai_speech_to_text.py::KyutaiSpeechToTextModelTest::test_flash_attn_2_inference_equivalence_right_padding"
338
+ ]
339
+ },
340
+ "lfm2": {
341
+ "single-gpu": [
342
+ "tests/models/lfm2/test_modeling_lfm2.py::Lfm2ModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids",
343
+ "tests/models/lfm2/test_modeling_lfm2.py::Lfm2ModelTest::test_flash_attn_2_can_compile_with_attention_mask_None_without_graph_break"
344
+ ],
345
+ "multi-gpu": [
346
+ "tests/models/lfm2/test_modeling_lfm2.py::Lfm2ModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids",
347
+ "tests/models/lfm2/test_modeling_lfm2.py::Lfm2ModelTest::test_flash_attn_2_can_compile_with_attention_mask_None_without_graph_break"
348
+ ]
349
+ },
350
+ "lfm2_moe": {
351
+ "single-gpu": [
352
+ "tests/models/lfm2_moe/test_modeling_lfm2_moe.py::Lfm2MoeModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids",
353
+ "tests/models/lfm2_moe/test_modeling_lfm2_moe.py::Lfm2MoeModelTest::test_flash_attn_2_can_compile_with_attention_mask_None_without_graph_break"
354
+ ],
355
+ "multi-gpu": [
356
+ "tests/models/lfm2_moe/test_modeling_lfm2_moe.py::Lfm2MoeModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids",
357
+ "tests/models/lfm2_moe/test_modeling_lfm2_moe.py::Lfm2MoeModelTest::test_flash_attn_2_can_compile_with_attention_mask_None_without_graph_break"
358
+ ]
359
+ },
360
+ "lfm2_vl": {
361
+ "single-gpu": [
362
+ "tests/models/lfm2_vl/test_modeling_lfm2_vl.py::Lfm2VlModelTest::test_flash_attn_2_inference_equivalence",
363
+ "tests/models/lfm2_vl/test_modeling_lfm2_vl.py::Lfm2VlModelTest::test_flash_attn_2_inference_equivalence_right_padding"
364
+ ],
365
+ "multi-gpu": [
366
+ "tests/models/lfm2_vl/test_modeling_lfm2_vl.py::Lfm2VlModelTest::test_flash_attn_2_inference_equivalence",
367
+ "tests/models/lfm2_vl/test_modeling_lfm2_vl.py::Lfm2VlModelTest::test_flash_attn_2_inference_equivalence_right_padding"
368
+ ]
369
+ },
370
+ "llava_next": {
371
+ "single-gpu": [
372
+ "tests/models/llava_next/test_modeling_llava_next.py::LlavaNextForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
373
+ "tests/models/llava_next/test_modeling_llava_next.py::LlavaNextForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding"
374
+ ],
375
+ "multi-gpu": [
376
+ "tests/models/llava_next/test_modeling_llava_next.py::LlavaNextForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
377
+ "tests/models/llava_next/test_modeling_llava_next.py::LlavaNextForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding"
378
+ ]
379
+ },
380
+ "llava_next_video": {
381
+ "single-gpu": [
382
+ "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
383
+ "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding"
384
+ ],
385
+ "multi-gpu": [
386
+ "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
387
+ "tests/models/llava_next_video/test_modeling_llava_next_video.py::LlavaNextVideoForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding"
388
+ ]
389
+ },
390
+ "llava_onevision": {
391
+ "single-gpu": [
392
+ "tests/models/llava_onevision/test_modeling_llava_onevision.py::LlavaOnevisionForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
393
+ "tests/models/llava_onevision/test_modeling_llava_onevision.py::LlavaOnevisionForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding"
394
+ ],
395
+ "multi-gpu": [
396
+ "tests/models/llava_onevision/test_modeling_llava_onevision.py::LlavaOnevisionForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
397
+ "tests/models/llava_onevision/test_modeling_llava_onevision.py::LlavaOnevisionForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding"
398
+ ]
399
+ },
400
+ "mask2former": {
401
+ "single-gpu": [
402
+ "tests/models/mask2former/test_modeling_mask2former.py::Mask2FormerModelTest::test_flash_attn_2_inference_equivalence",
403
+ "tests/models/mask2former/test_modeling_mask2former.py::Mask2FormerModelTest::test_flash_attn_2_inference_equivalence_right_padding"
404
+ ],
405
+ "multi-gpu": [
406
+ "tests/models/mask2former/test_modeling_mask2former.py::Mask2FormerModelTest::test_flash_attn_2_inference_equivalence",
407
+ "tests/models/mask2former/test_modeling_mask2former.py::Mask2FormerModelTest::test_flash_attn_2_inference_equivalence_right_padding"
408
+ ]
409
+ },
410
+ "maskformer": {
411
+ "single-gpu": [
412
+ "tests/models/maskformer/test_modeling_maskformer.py::MaskFormerModelTest::test_flash_attn_2_inference_equivalence",
413
+ "tests/models/maskformer/test_modeling_maskformer.py::MaskFormerModelTest::test_flash_attn_2_inference_equivalence_right_padding",
414
+ "tests/models/maskformer/test_modeling_maskformer_swin.py::MaskFormerSwinModelTest::test_flash_attn_2_inference_equivalence",
415
+ "tests/models/maskformer/test_modeling_maskformer_swin.py::MaskFormerSwinModelTest::test_flash_attn_2_inference_equivalence_right_padding"
416
+ ],
417
+ "multi-gpu": [
418
+ "tests/models/maskformer/test_modeling_maskformer.py::MaskFormerModelTest::test_flash_attn_2_inference_equivalence",
419
+ "tests/models/maskformer/test_modeling_maskformer.py::MaskFormerModelTest::test_flash_attn_2_inference_equivalence_right_padding",
420
+ "tests/models/maskformer/test_modeling_maskformer_swin.py::MaskFormerSwinModelTest::test_flash_attn_2_inference_equivalence",
421
+ "tests/models/maskformer/test_modeling_maskformer_swin.py::MaskFormerSwinModelTest::test_flash_attn_2_inference_equivalence_right_padding"
422
+ ]
423
+ },
424
+ "mixtral": {
425
+ "single-gpu": [],
426
+ "multi-gpu": [
427
+ "tests/models/mixtral/test_modeling_mixtral.py::MixtralModelTest::test_flash_attn_2_equivalence"
428
+ ]
429
+ },
430
+ "mllama": {
431
+ "single-gpu": [
432
+ "tests/models/mllama/test_modeling_mllama.py::MllamaForConditionalGenerationModelTest::test_eager_matches_fa2_generate",
433
+ "tests/models/mllama/test_modeling_mllama.py::MllamaForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
434
+ "tests/models/mllama/test_modeling_mllama.py::MllamaForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding"
435
+ ],
436
+ "multi-gpu": [
437
+ "tests/models/mllama/test_modeling_mllama.py::MllamaForConditionalGenerationModelTest::test_eager_matches_fa2_generate",
438
+ "tests/models/mllama/test_modeling_mllama.py::MllamaForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
439
+ "tests/models/mllama/test_modeling_mllama.py::MllamaForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding",
440
+ "tests/models/mllama/test_modeling_mllama.py::MllamaForCausalLMModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids"
441
+ ]
442
+ },
443
+ "mm_grounding_dino": {
444
+ "single-gpu": [
445
+ "tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py::MMGroundingDinoModelTest::test_flash_attn_2_inference_equivalence",
446
+ "tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py::MMGroundingDinoModelTest::test_flash_attn_2_inference_equivalence_right_padding"
447
+ ],
448
+ "multi-gpu": [
449
+ "tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py::MMGroundingDinoModelTest::test_flash_attn_2_inference_equivalence",
450
+ "tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py::MMGroundingDinoModelTest::test_flash_attn_2_inference_equivalence_right_padding"
451
+ ]
452
+ },
453
+ "modernbert": {
454
+ "single-gpu": [
455
+ "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelTest::test_flash_attn_2_inference_equivalence"
456
+ ],
457
+ "multi-gpu": [
458
+ "tests/models/modernbert/test_modeling_modernbert.py::ModernBertModelTest::test_flash_attn_2_inference_equivalence"
459
+ ]
460
+ },
461
+ "moshi": {
462
+ "single-gpu": [
463
+ "tests/models/moshi/test_modeling_moshi.py::MoshiDecoderTest::test_flash_attn_2_inference_equivalence",
464
+ "tests/models/moshi/test_modeling_moshi.py::MoshiDecoderTest::test_flash_attn_2_inference_equivalence_right_padding",
465
+ "tests/models/moshi/test_modeling_moshi.py::MoshiTest::test_eager_matches_fa2_generate",
466
+ "tests/models/moshi/test_modeling_moshi.py::MoshiTest::test_flash_attn_2_fp32_ln",
467
+ "tests/models/moshi/test_modeling_moshi.py::MoshiTest::test_flash_attn_2_from_config",
468
+ "tests/models/moshi/test_modeling_moshi.py::MoshiTest::test_flash_attn_2_inference_equivalence",
469
+ "tests/models/moshi/test_modeling_moshi.py::MoshiTest::test_flash_attn_2_inference_equivalence_right_padding"
470
+ ],
471
+ "multi-gpu": [
472
+ "tests/models/moshi/test_modeling_moshi.py::MoshiDecoderTest::test_flash_attn_2_inference_equivalence",
473
+ "tests/models/moshi/test_modeling_moshi.py::MoshiDecoderTest::test_flash_attn_2_inference_equivalence_right_padding",
474
+ "tests/models/moshi/test_modeling_moshi.py::MoshiTest::test_eager_matches_fa2_generate",
475
+ "tests/models/moshi/test_modeling_moshi.py::MoshiTest::test_flash_attn_2_fp32_ln",
476
+ "tests/models/moshi/test_modeling_moshi.py::MoshiTest::test_flash_attn_2_from_config",
477
+ "tests/models/moshi/test_modeling_moshi.py::MoshiTest::test_flash_attn_2_inference_equivalence",
478
+ "tests/models/moshi/test_modeling_moshi.py::MoshiTest::test_flash_attn_2_inference_equivalence_right_padding"
479
+ ]
480
+ },
481
+ "nemotron": {
482
+ "single-gpu": [
483
+ "tests/models/nemotron/test_modeling_nemotron.py::NemotronModelTest::test_flash_attn_2_equivalence"
484
+ ],
485
+ "multi-gpu": [
486
+ "tests/models/nemotron/test_modeling_nemotron.py::NemotronModelTest::test_flash_attn_2_equivalence"
487
+ ]
488
+ },
489
+ "olmo": {
490
+ "single-gpu": [
491
+ "tests/models/olmo/test_modeling_olmo.py::OlmoModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids"
492
+ ],
493
+ "multi-gpu": [
494
+ "tests/models/olmo/test_modeling_olmo.py::OlmoModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids"
495
+ ]
496
+ },
497
+ "olmo2": {
498
+ "single-gpu": [
499
+ "tests/models/olmo2/test_modeling_olmo2.py::Olmo2ModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids"
500
+ ],
501
+ "multi-gpu": [
502
+ "tests/models/olmo2/test_modeling_olmo2.py::Olmo2ModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids"
503
+ ]
504
+ },
505
+ "omdet_turbo": {
506
+ "single-gpu": [
507
+ "tests/models/omdet_turbo/test_modeling_omdet_turbo.py::OmDetTurboModelTest::test_flash_attn_2_inference_equivalence",
508
+ "tests/models/omdet_turbo/test_modeling_omdet_turbo.py::OmDetTurboModelTest::test_flash_attn_2_inference_equivalence_right_padding"
509
+ ],
510
+ "multi-gpu": [
511
+ "tests/models/omdet_turbo/test_modeling_omdet_turbo.py::OmDetTurboModelTest::test_flash_attn_2_inference_equivalence",
512
+ "tests/models/omdet_turbo/test_modeling_omdet_turbo.py::OmDetTurboModelTest::test_flash_attn_2_inference_equivalence_right_padding"
513
+ ]
514
+ },
515
+ "oneformer": {
516
+ "single-gpu": [
517
+ "tests/models/oneformer/test_modeling_oneformer.py::OneFormerModelTest::test_flash_attn_2_inference_equivalence",
518
+ "tests/models/oneformer/test_modeling_oneformer.py::OneFormerModelTest::test_flash_attn_2_inference_equivalence_right_padding"
519
+ ],
520
+ "multi-gpu": [
521
+ "tests/models/oneformer/test_modeling_oneformer.py::OneFormerModelTest::test_flash_attn_2_inference_equivalence",
522
+ "tests/models/oneformer/test_modeling_oneformer.py::OneFormerModelTest::test_flash_attn_2_inference_equivalence_right_padding"
523
+ ]
524
+ },
525
+ "paligemma": {
526
+ "single-gpu": [
527
+ "tests/models/paligemma/test_modeling_paligemma.py::PaliGemmaForConditionalGenerationModelTest::test_flash_attn_2_from_config",
528
+ "tests/models/paligemma/test_modeling_paligemma.py::PaliGemmaForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
529
+ "tests/models/paligemma/test_modeling_paligemma.py::PaliGemmaForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding"
530
+ ],
531
+ "multi-gpu": [
532
+ "tests/models/paligemma/test_modeling_paligemma.py::PaliGemmaForConditionalGenerationModelTest::test_flash_attn_2_from_config",
533
+ "tests/models/paligemma/test_modeling_paligemma.py::PaliGemmaForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
534
+ "tests/models/paligemma/test_modeling_paligemma.py::PaliGemmaForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding"
535
+ ]
536
+ },
537
+ "paligemma2": {
538
+ "single-gpu": [
539
+ "tests/models/paligemma2/test_modeling_paligemma2.py::PaliGemma2ForConditionalGenerationModelTest::test_flash_attn_2_from_config",
540
+ "tests/models/paligemma2/test_modeling_paligemma2.py::PaliGemma2ForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
541
+ "tests/models/paligemma2/test_modeling_paligemma2.py::PaliGemma2ForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding"
542
+ ],
543
+ "multi-gpu": [
544
+ "tests/models/paligemma2/test_modeling_paligemma2.py::PaliGemma2ForConditionalGenerationModelTest::test_flash_attn_2_from_config",
545
+ "tests/models/paligemma2/test_modeling_paligemma2.py::PaliGemma2ForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence",
546
+ "tests/models/paligemma2/test_modeling_paligemma2.py::PaliGemma2ForConditionalGenerationModelTest::test_flash_attn_2_inference_equivalence_right_padding"
547
+ ]
548
+ },
549
+ "pegasus_x": {
550
+ "single-gpu": [
551
+ "tests/models/pegasus_x/test_modeling_pegasus_x.py::PegasusXModelTest::test_flash_attn_2_inference_equivalence",
552
+ "tests/models/pegasus_x/test_modeling_pegasus_x.py::PegasusXModelTest::test_flash_attn_2_inference_equivalence_right_padding",
553
+ "tests/models/pegasus_x/test_modeling_pegasus_x.py::PegasusXStandaloneDecoderModelTest::test_flash_attn_2_inference_equivalence",
554
+ "tests/models/pegasus_x/test_modeling_pegasus_x.py::PegasusXStandaloneDecoderModelTest::test_flash_attn_2_inference_equivalence_right_padding"
555
+ ],
556
+ "multi-gpu": [
557
+ "tests/models/pegasus_x/test_modeling_pegasus_x.py::PegasusXModelTest::test_flash_attn_2_inference_equivalence",
558
+ "tests/models/pegasus_x/test_modeling_pegasus_x.py::PegasusXModelTest::test_flash_attn_2_inference_equivalence_right_padding",
559
+ "tests/models/pegasus_x/test_modeling_pegasus_x.py::PegasusXStandaloneDecoderModelTest::test_flash_attn_2_inference_equivalence",
560
+ "tests/models/pegasus_x/test_modeling_pegasus_x.py::PegasusXStandaloneDecoderModelTest::test_flash_attn_2_inference_equivalence_right_padding"
561
+ ]
562
+ },
563
+ "perception_lm": {
564
+ "single-gpu": [
565
+ "tests/models/perception_lm/test_modeling_perception_lm.py::PerceptionLMForConditionalGenerationModelTest::test_flash_attention_2_continue_generate_with_position_ids"
566
+ ],
567
+ "multi-gpu": [
568
+ "tests/models/perception_lm/test_modeling_perception_lm.py::PerceptionLMForConditionalGenerationModelTest::test_flash_attention_2_continue_generate_with_position_ids"
569
+ ]
570
+ },
571
+ "phi": {
572
+ "single-gpu": [
573
+ "tests/models/phi/test_modeling_phi.py::PhiModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids"
574
+ ],
575
+ "multi-gpu": [
576
+ "tests/models/phi/test_modeling_phi.py::PhiModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids"
577
+ ]
578
+ },
579
+ "phimoe": {
580
+ "single-gpu": [
581
+ "tests/models/phimoe/test_modeling_phimoe.py::PhimoeModelTest::test_flash_attn_2_equivalence"
582
+ ],
583
+ "multi-gpu": [
584
+ "tests/models/phimoe/test_modeling_phimoe.py::PhimoeModelTest::test_flash_attn_2_equivalence",
585
+ "tests/models/phimoe/test_modeling_phimoe.py::PhimoeModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs"
586
+ ]
587
+ },
588
+ "pixtral": {
589
+ "single-gpu": [
590
+ "tests/models/pixtral/test_modeling_pixtral.py::PixtralVisionModelModelTest::test_flash_attn_2_inference_equivalence",
591
+ "tests/models/pixtral/test_modeling_pixtral.py::PixtralVisionModelModelTest::test_flash_attn_2_inference_equivalence_right_padding"
592
+ ],
593
+ "multi-gpu": [
594
+ "tests/models/pixtral/test_modeling_pixtral.py::PixtralVisionModelModelTest::test_flash_attn_2_inference_equivalence",
595
+ "tests/models/pixtral/test_modeling_pixtral.py::PixtralVisionModelModelTest::test_flash_attn_2_inference_equivalence_right_padding"
596
+ ]
597
+ },
598
+ "qwen2_5_vl": {
599
+ "single-gpu": [
600
+ "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_wo_image_flashatt2"
601
+ ],
602
+ "multi-gpu": [
603
+ "tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py::Qwen2_5_VLIntegrationTest::test_small_model_integration_test_batch_wo_image_flashatt2"
604
+ ]
605
+ },
606
+ "qwen3_omni_moe": {
607
+ "single-gpu": [
608
+ "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids",
609
+ "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs",
610
+ "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_batch_flashatt2"
611
+ ],
612
+ "multi-gpu": [
613
+ "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids",
614
+ "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py::Qwen2_5OmniThinkerForConditionalGenerationModelTest::test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs",
615
+ "tests/models/qwen3_omni_moe/test_modeling_qwen3_omni_moe.py::Qwen2_5OmniModelIntegrationTest::test_small_model_integration_test_batch_flashatt2"
616
+ ]
617
+ },
618
+ "roberta_prelayernorm": {
619
+ "single-gpu": [
620
+ "tests/models/roberta_prelayernorm/test_modeling_roberta_prelayernorm.py::RobertaPreLayerNormModelTest::test_flash_attn_2_inference_equivalence_right_padding"
621
+ ],
622
+ "multi-gpu": []
623
+ },
624
+ "sam2": {
625
+ "single-gpu": [
626
+ "tests/models/sam2/test_modeling_sam2.py::Sam2ModelTest::test_flash_attn_2_can_dispatch_composite_models"
627
+ ],
628
+ "multi-gpu": [
629
+ "tests/models/sam2/test_modeling_sam2.py::Sam2ModelTest::test_flash_attn_2_can_dispatch_composite_models"
630
+ ]
631
+ },
632
+ "smollm3": {
633
+ "single-gpu": [
634
+ "tests/models/smollm3/test_modeling_smollm3.py::SmolLM3IntegrationTest::test_model_3b_long_prompt"
635
+ ],
636
+ "multi-gpu": [
637
+ "tests/models/smollm3/test_modeling_smollm3.py::SmolLM3IntegrationTest::test_model_3b_long_prompt"
638
+ ]
639
+ },
640
+ "squeezebert": {
641
+ "single-gpu": [
642
+ "tests/models/squeezebert/test_modeling_squeezebert.py::SqueezeBertModelTest::test_flash_attn_2_inference_equivalence",
643
+ "tests/models/squeezebert/test_modeling_squeezebert.py::SqueezeBertModelTest::test_flash_attn_2_inference_equivalence_right_padding"
644
+ ],
645
+ "multi-gpu": [
646
+ "tests/models/squeezebert/test_modeling_squeezebert.py::SqueezeBertModelTest::test_flash_attn_2_inference_equivalence",
647
+ "tests/models/squeezebert/test_modeling_squeezebert.py::SqueezeBertModelTest::test_flash_attn_2_inference_equivalence_right_padding"
648
+ ]
649
+ },
650
+ "starcoder2": {
651
+ "single-gpu": [
652
+ "tests/models/starcoder2/test_modeling_starcoder2.py::Starcoder2IntegrationTest::test_starcoder2_batched_generation_fa2"
653
+ ],
654
+ "multi-gpu": [
655
+ "tests/models/starcoder2/test_modeling_starcoder2.py::Starcoder2IntegrationTest::test_starcoder2_batched_generation_fa2"
656
+ ]
657
+ },
658
+ "swin": {
659
+ "single-gpu": [
660
+ "tests/models/swin/test_modeling_swin.py::SwinModelTest::test_flash_attn_2_inference_equivalence",
661
+ "tests/models/swin/test_modeling_swin.py::SwinModelTest::test_flash_attn_2_inference_equivalence_right_padding"
662
+ ],
663
+ "multi-gpu": [
664
+ "tests/models/swin/test_modeling_swin.py::SwinModelTest::test_flash_attn_2_inference_equivalence",
665
+ "tests/models/swin/test_modeling_swin.py::SwinModelTest::test_flash_attn_2_inference_equivalence_right_padding"
666
+ ]
667
+ },
668
+ "swin2sr": {
669
+ "single-gpu": [
670
+ "tests/models/swin2sr/test_modeling_swin2sr.py::Swin2SRModelTest::test_flash_attn_2_inference_equivalence",
671
+ "tests/models/swin2sr/test_modeling_swin2sr.py::Swin2SRModelTest::test_flash_attn_2_inference_equivalence_right_padding"
672
+ ],
673
+ "multi-gpu": [
674
+ "tests/models/swin2sr/test_modeling_swin2sr.py::Swin2SRModelTest::test_flash_attn_2_inference_equivalence",
675
+ "tests/models/swin2sr/test_modeling_swin2sr.py::Swin2SRModelTest::test_flash_attn_2_inference_equivalence_right_padding"
676
+ ]
677
+ },
678
+ "swinv2": {
679
+ "single-gpu": [
680
+ "tests/models/swinv2/test_modeling_swinv2.py::Swinv2ModelTest::test_flash_attn_2_inference_equivalence",
681
+ "tests/models/swinv2/test_modeling_swinv2.py::Swinv2ModelTest::test_flash_attn_2_inference_equivalence_right_padding"
682
+ ],
683
+ "multi-gpu": [
684
+ "tests/models/swinv2/test_modeling_swinv2.py::Swinv2ModelTest::test_flash_attn_2_inference_equivalence",
685
+ "tests/models/swinv2/test_modeling_swinv2.py::Swinv2ModelTest::test_flash_attn_2_inference_equivalence_right_padding"
686
+ ]
687
+ },
688
+ "t5gemma": {
689
+ "single-gpu": [
690
+ "tests/models/t5gemma/test_modeling_t5gemma.py::T5GemmaModelTest::test_flash_attn_2_can_compile_with_attention_mask_None_without_graph_break"
691
+ ],
692
+ "multi-gpu": [
693
+ "tests/models/t5gemma/test_modeling_t5gemma.py::T5GemmaModelTest::test_flash_attn_2_can_compile_with_attention_mask_None_without_graph_break"
694
+ ]
695
+ },
696
+ "zamba": {
697
+ "single-gpu": [
698
+ "tests/models/zamba/test_modeling_zamba.py::ZambaModelTest::test_flash_attn_2_fp32_ln"
699
+ ],
700
+ "multi-gpu": [
701
+ "tests/models/zamba/test_modeling_zamba.py::ZambaModelTest::test_flash_attn_2_fp32_ln"
702
+ ]
703
+ }
704
+ }