Skip to content

Commit c154b02

Browse files
[CI] Fixing some AMD failures (#42879)
* fix qwen2 & qwen2_5_omni * one more fix * fix qwen2_5_vl * fix * fix some more failures on nvidia
1 parent 491e0cd commit c154b02

File tree

5 files changed

+22
-18
lines changed

5 files changed

+22
-18
lines changed

src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2459,7 +2459,11 @@ def forward(
24592459
self.rope_deltas = rope_deltas
24602460

24612461
else:
2462-
batch_size, seq_length, _ = inputs_embeds.shape
2462+
if inputs_embeds is not None:
2463+
batch_size, seq_length, _ = inputs_embeds.shape
2464+
else:
2465+
batch_size, seq_length = input_ids.shape
2466+
24632467
delta = (past_key_values_length + self.rope_deltas).to(input_ids.device)
24642468
position_ids = torch.arange(seq_length, device=input_ids.device)
24652469
position_ids = position_ids.view(1, -1).expand(batch_size, -1)

src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2615,7 +2615,11 @@ def forward(
26152615
self.rope_deltas = rope_deltas
26162616

26172617
else:
2618-
batch_size, seq_length, _ = inputs_embeds.shape
2618+
if inputs_embeds is not None:
2619+
batch_size, seq_length, _ = inputs_embeds.shape
2620+
else:
2621+
batch_size, seq_length = input_ids.shape
2622+
26192623
delta = (past_key_values_length + self.rope_deltas).to(input_ids.device)
26202624
position_ids = torch.arange(seq_length, device=input_ids.device)
26212625
position_ids = position_ids.view(1, -1).expand(batch_size, -1)

tests/models/qwen2/test_modeling_qwen2.py

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -78,11 +78,10 @@ def test_model_450m_logits(self):
7878
with torch.no_grad():
7979
out = model(input_ids).logits.float().cpu()
8080
# Expected mean on dim = -1
81-
EXPECTED_MEAN = torch.tensor([[-1.9537, -1.6193, -1.4123, -1.4673, -1.8511, -1.9309, -1.9826, -2.1776]])
81+
EXPECTED_MEAN = torch.tensor([[-2.2121, -1.6335, -1.4816, -1.5035, -1.9110, -1.8979, -1.9682, -2.1980]])
8282
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
8383
# slicing logits[0, 0, 0:30]
84-
EXPECTED_SLICE = torch.tensor([3.2025, 7.1265, 4.6058, 3.6423, 1.6357, 3.9265, 5.1883, 5.8760, 2.7942, 4.4823, 3.2571, 2.1063, 3.4275, 4.2028, 1.9767, 5.2115, 6.6756, 6.3999, 6.0483, 5.7378, 5.6660, 5.2298, 5.4103, 5.1248, 5.4376, 2.4570, 2.6107, 5.4039, 2.8077, 4.7777]) # fmt: skip
85-
print(out[0, 0, :30])
84+
EXPECTED_SLICE = torch.tensor([2.7344, 4.2812, 4.1562, 2.3906, 1.1875, 2.1562, 3.1719, 3.1406, 1.2891, 3.6094, 3.3125, 1.8203, 2.9219, 3.2344, 1.5938, 6.2500, 7.4062, 7.2188, 6.5938, 6.0312, 6.1562, 5.3750, 5.9688, 5.5938, 6.1250, 1.2656, 1.6016, 3.4062, 1.7891, 3.6406]) # fmt: skip
8685
torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4)
8786

8887
del model
@@ -92,7 +91,7 @@ def test_model_450m_logits(self):
9291
@slow
9392
def test_model_450m_generation(self):
9493
EXPECTED_TEXT_COMPLETION = (
95-
"""My favourite condiment is 100% natural, organic and vegan. I love to use it in my cooking and I"""
94+
"""My favourite condiment is 100% natural, organic and vegan. I love to use it in my cooking, but"""
9695
)
9796
prompt = "My favourite condiment is "
9897
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B", use_fast=False)
@@ -161,7 +160,7 @@ def test_model_450m_long_prompt_sdpa(self):
161160
gc.collect()
162161

163162
EXPECTED_TEXT_COMPLETION = (
164-
"My favourite condiment is 100% natural, organic and vegan. I love to use it in my cooking and I"
163+
"My favourite condiment is 100% natural, organic and vegan. I love to use it in my cooking, but"
165164
)
166165
prompt = "My favourite condiment is "
167166
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B", use_fast=False)
@@ -211,11 +210,8 @@ def test_export_static_cache(self):
211210
tokenizer = AutoTokenizer.from_pretrained(qwen_model, pad_token="</s>", padding_side="right")
212211

213212
expected_text_completions = Expectations({
214-
("cuda", None): [
215-
"My favourite condiment is 100% natural, organic, gluten free, vegan, and free from preservatives. I"
216-
],
217213
("cuda", 8): [
218-
"My favourite condiment is 100% natural, organic, gluten free, vegan, and vegetarian. I love to use"
214+
"My favourite condiment is 100% natural, organic, gluten free, vegan, and free from preservatives. I"
219215
],
220216
("rocm", (9, 4)): [
221217
"My favourite condiment is 100% natural, organic and vegan. I love to use it in my cooking, but"

tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -684,7 +684,7 @@ def test_small_model_integration_test(self):
684684

685685
EXPECTED_DECODED_TEXT = Expectations({
686686
("xpu", None): "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
687-
("cuda", (8, 6)): "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
687+
("cuda", (8, 6)): "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is a glass shattering. The dog in the picture is a Labrador Retriever.",
688688
("rocm", (9, 4)): "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
689689
}).get_expectation() # fmt: skip
690690

@@ -720,11 +720,11 @@ def test_small_model_integration_test_batch(self):
720720
"system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is of glass shattering, and the dog in the picture is a Labrador Retriever",
721721
],
722722
("cuda", 8): [
723-
"system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
724-
"system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
723+
"system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is a glass shattering. The dog in the picture is a Labrador Retriever.",
724+
"system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is a glass shattering. The dog in the picture is a Labrador Retriever.",
725725
],
726726
("rocm", (9, 4)): [
727-
"system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
727+
"system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is a glass shattering. The dog in the picture is a Labrador Retriever.",
728728
"system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.",
729729
],
730730
}

tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -572,8 +572,8 @@ def test_small_model_integration_test_batch_different_resolutions(self):
572572
"system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\n addCriterion\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and gentle nature, which is",
573573
],
574574
("cuda", (8, 6)): [
575-
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in',
576-
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in',
575+
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\n addCriterion\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and gentle nature, which is',
576+
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\n addCriterion\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and gentle nature, which is',
577577
],
578578
("rocm", None): [
579579
'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and energetic nature, which is evident in',
@@ -706,7 +706,7 @@ def test_small_model_integration_test_with_video(self):
706706
output = model.generate(**inputs, max_new_tokens=30)
707707

708708
EXPECTED_DECODED_TEXT = [
709-
'system\nYou are a helpful assistant.\nuser\nWhat is shown in this video?\nassistant\nThe video shows an indoor tennis court with a person standing on one side, preparing to serve the ball. The individual is dressed in athletic attire, including',
709+
'system\nYou are a helpful assistant.\nuser\nWhat is shown in this video?\nassistant\nThe video shows an indoor tennis court with a player standing on the baseline, preparing to serve. The player is wearing a white shirt and black shorts,',
710710
] # fmt: skip
711711
self.assertEqual(
712712
self.processor.batch_decode(output, skip_special_tokens=True),

0 commit comments

Comments
 (0)