Skip to content

Commit a773423

Browse files
authored
Generation tests: update imagegpt input name, remove unused functions (#33663)
1 parent 6f7d750 commit a773423

18 files changed

+23
-656
lines changed

src/transformers/models/imagegpt/modeling_imagegpt.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -690,8 +690,7 @@ def forward(
690690

691691
if "pixel_values" in kwargs:
692692
warnings.warn(
693-
"The `pixel_values` argument is deprecated and will be removed in a future version, use `input_ids`"
694-
" instead.",
693+
"The `pixel_values` argument is deprecated and will be removed in v4.47, use `input_ids` instead.",
695694
FutureWarning,
696695
)
697696

@@ -1004,8 +1003,7 @@ def forward(
10041003

10051004
if "pixel_values" in kwargs:
10061005
warnings.warn(
1007-
"The `pixel_values` argument is deprecated and will be removed in a future version, use `input_ids`"
1008-
" instead.",
1006+
"The `pixel_values` argument is deprecated and will be removed in v4.47, use `input_ids` instead.",
10091007
FutureWarning,
10101008
)
10111009

@@ -1137,8 +1135,7 @@ def forward(
11371135

11381136
if "pixel_values" in kwargs:
11391137
warnings.warn(
1140-
"The `pixel_values` argument is deprecated and will be removed in a future version, use `input_ids`"
1141-
" instead.",
1138+
"The `pixel_values` argument is deprecated and will be removed in v4.47, use `input_ids` instead.",
11421139
FutureWarning,
11431140
)
11441141

tests/generation/test_utils.py

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -190,26 +190,6 @@ def _get_constrained_beam_kwargs(self, num_return_sequences=1):
190190
}
191191
return beam_kwargs
192192

193-
@staticmethod
194-
def _get_encoder_outputs(
195-
model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1
196-
):
197-
encoder = model.get_encoder()
198-
encoder_outputs = encoder(
199-
input_ids,
200-
attention_mask=attention_mask,
201-
output_attentions=output_attentions,
202-
output_hidden_states=output_hidden_states,
203-
)
204-
encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave(
205-
num_interleave, dim=0
206-
)
207-
generation_config = copy.deepcopy(model.generation_config)
208-
model._prepare_special_tokens(generation_config)
209-
input_ids = torch.zeros_like(input_ids[:, :1]) + generation_config.decoder_start_token_id
210-
attention_mask = None
211-
return encoder_outputs, input_ids, attention_mask
212-
213193
def _greedy_generate(
214194
self,
215195
model,

tests/models/codegen/test_modeling_codegen.py

Lines changed: 1 addition & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323

2424
from ...generation.test_utils import GenerationTesterMixin
2525
from ...test_configuration_common import ConfigTester
26-
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
26+
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
2727
from ...test_pipeline_mixin import PipelineTesterMixin
2828

2929

@@ -150,35 +150,6 @@ def get_config(self):
150150
rotary_dim=self.rotary_dim,
151151
)
152152

153-
def prepare_config_and_inputs_for_decoder(self):
154-
(
155-
config,
156-
input_ids,
157-
input_mask,
158-
head_mask,
159-
token_type_ids,
160-
mc_token_ids,
161-
sequence_labels,
162-
token_labels,
163-
choice_labels,
164-
) = self.prepare_config_and_inputs()
165-
166-
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
167-
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
168-
169-
return (
170-
config,
171-
input_ids,
172-
input_mask,
173-
head_mask,
174-
token_type_ids,
175-
sequence_labels,
176-
token_labels,
177-
choice_labels,
178-
encoder_hidden_states,
179-
encoder_attention_mask,
180-
)
181-
182153
def create_and_check_codegen_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
183154
model = CodeGenModel(config=config)
184155
model.to(torch_device)

tests/models/falcon_mamba/test_modeling_falcon_mamba.py

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -150,25 +150,6 @@ def get_pipeline_config(self):
150150
config.vocab_size = 300
151151
return config
152152

153-
def prepare_config_and_inputs_for_decoder(self):
154-
(
155-
config,
156-
input_ids,
157-
attention_mask,
158-
sequence_labels,
159-
token_labels,
160-
choice_labels,
161-
) = self.prepare_config_and_inputs()
162-
163-
return (
164-
config,
165-
input_ids,
166-
attention_mask,
167-
sequence_labels,
168-
token_labels,
169-
choice_labels,
170-
)
171-
172153
def create_and_check_falcon_mamba_model(self, config, input_ids, *args):
173154
config.output_hidden_states = True
174155
model = FalconMambaModel(config=config)

tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py

Lines changed: 1 addition & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222

2323
from ...generation.test_utils import GenerationTesterMixin
2424
from ...test_configuration_common import ConfigTester
25-
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
25+
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
2626
from ...test_pipeline_mixin import PipelineTesterMixin
2727

2828

@@ -178,35 +178,6 @@ def get_pipeline_config(self):
178178
config.vocab_size = 300
179179
return config
180180

181-
def prepare_config_and_inputs_for_decoder(self):
182-
(
183-
config,
184-
input_ids,
185-
input_mask,
186-
head_mask,
187-
token_type_ids,
188-
mc_token_ids,
189-
sequence_labels,
190-
token_labels,
191-
choice_labels,
192-
) = self.prepare_config_and_inputs()
193-
194-
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
195-
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
196-
197-
return (
198-
config,
199-
input_ids,
200-
input_mask,
201-
head_mask,
202-
token_type_ids,
203-
sequence_labels,
204-
token_labels,
205-
choice_labels,
206-
encoder_hidden_states,
207-
encoder_attention_mask,
208-
)
209-
210181
def create_and_check_gpt_bigcode_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
211182
model = GPTBigCodeModel(config=config)
212183
model.to(torch_device)

tests/models/gpt_neo/test_modeling_gpt_neo.py

Lines changed: 1 addition & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222

2323
from ...generation.test_utils import GenerationTesterMixin
2424
from ...test_configuration_common import ConfigTester
25-
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
25+
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
2626
from ...test_pipeline_mixin import PipelineTesterMixin
2727

2828

@@ -157,35 +157,6 @@ def get_pipeline_config(self):
157157
config.vocab_size = 300
158158
return config
159159

160-
def prepare_config_and_inputs_for_decoder(self):
161-
(
162-
config,
163-
input_ids,
164-
input_mask,
165-
head_mask,
166-
token_type_ids,
167-
mc_token_ids,
168-
sequence_labels,
169-
token_labels,
170-
choice_labels,
171-
) = self.prepare_config_and_inputs()
172-
173-
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
174-
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
175-
176-
return (
177-
config,
178-
input_ids,
179-
input_mask,
180-
head_mask,
181-
token_type_ids,
182-
sequence_labels,
183-
token_labels,
184-
choice_labels,
185-
encoder_hidden_states,
186-
encoder_attention_mask,
187-
)
188-
189160
def create_and_check_gpt_neo_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
190161
model = GPTNeoModel(config=config)
191162
model.to(torch_device)

tests/models/gptj/test_modeling_gptj.py

Lines changed: 1 addition & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232

3333
from ...generation.test_utils import GenerationTesterMixin
3434
from ...test_configuration_common import ConfigTester
35-
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
35+
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
3636
from ...test_pipeline_mixin import PipelineTesterMixin
3737

3838

@@ -173,35 +173,6 @@ def get_pipeline_config(self):
173173
config.vocab_size = 300
174174
return config
175175

176-
def prepare_config_and_inputs_for_decoder(self):
177-
(
178-
config,
179-
input_ids,
180-
input_mask,
181-
head_mask,
182-
token_type_ids,
183-
mc_token_ids,
184-
sequence_labels,
185-
token_labels,
186-
choice_labels,
187-
) = self.prepare_config_and_inputs()
188-
189-
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
190-
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
191-
192-
return (
193-
config,
194-
input_ids,
195-
input_mask,
196-
head_mask,
197-
token_type_ids,
198-
sequence_labels,
199-
token_labels,
200-
choice_labels,
201-
encoder_hidden_states,
202-
encoder_attention_mask,
203-
)
204-
205176
def create_and_check_gptj_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
206177
model = GPTJModel(config=config)
207178
model.to(torch_device)

0 commit comments

Comments
 (0)