Skip to content
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/transformers/models/aimv2/modeling_aimv2.py
Original file line number Diff line number Diff line change
Expand Up @@ -444,7 +444,7 @@ def get_input_embeddings(self) -> nn.Module:
return self.embeddings.patch_embed

@deprecate_kwarg("attention_mask", version="v4.58.0")
@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -520,7 +520,7 @@ def get_input_embeddings(self) -> nn.Module:
def set_input_embeddings(self, value):
self.embeddings.token_embedding = value

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/aimv2/modular_aimv2.py
Original file line number Diff line number Diff line change
Expand Up @@ -488,7 +488,7 @@ def get_input_embeddings(self) -> nn.Module:
return self.embeddings.patch_embed

@deprecate_kwarg("attention_mask", version="v4.58.0")
@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -564,7 +564,7 @@ def get_input_embeddings(self) -> nn.Module:
def set_input_embeddings(self, value):
self.embeddings.token_embedding = value

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/albert/modeling_albert.py
Original file line number Diff line number Diff line change
Expand Up @@ -472,7 +472,7 @@ def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
inner_group_idx = int(layer - group_idx * self.config.inner_group_num)
self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/apertus/modeling_apertus.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ def __init__(self, config: ApertusConfig):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/arcee/modeling_arcee.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ def __init__(self, config: ArceeConfig):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/aria/modeling_aria.py
Original file line number Diff line number Diff line change
Expand Up @@ -721,7 +721,7 @@ def __init__(self, config: AriaTextConfig):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ class PreTrainedModel
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/aya_vision/modeling_aya_vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ def get_placeholder_mask(
)
return special_image_mask

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/aya_vision/modular_aya_vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def get_image_features(
image_features = self.multi_modal_projector(selected_image_feature)
return image_features

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/bert/modeling_bert.py
Original file line number Diff line number Diff line change
Expand Up @@ -756,7 +756,7 @@ class PreTrainedModel
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -617,7 +617,7 @@ class PreTrainedModel
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/bitnet/modeling_bitnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ def __init__(self, config: BitNetConfig):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/blip/modeling_blip.py
Original file line number Diff line number Diff line change
Expand Up @@ -508,7 +508,7 @@ def __init__(self, config: BlipVisionConfig):

self.post_init()

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/blip_2/modeling_blip_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -495,7 +495,7 @@ def __init__(self, config: Blip2VisionConfig):

self.post_init()

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -1007,7 +1007,7 @@ def get_extended_attention_mask(
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/blt/modeling_blt.py
Original file line number Diff line number Diff line change
Expand Up @@ -577,7 +577,7 @@ def __init__(self, config: BltLocalDecoderConfig):

self.post_init()

@check_model_inputs
@check_model_inputs()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
Expand Down Expand Up @@ -1047,7 +1047,7 @@ def __init__(self, config: BltConfig):
self.patcher = None
self.post_init()

@check_model_inputs
@check_model_inputs()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/blt/modular_blt.py
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,7 @@ def __init__(self, config: BltLocalDecoderConfig):

self.post_init()

@check_model_inputs
@check_model_inputs()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
Expand Down Expand Up @@ -797,7 +797,7 @@ def __init__(self, config: BltConfig):
self.patcher = None
self.post_init()

@check_model_inputs
@check_model_inputs()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/camembert/modeling_camembert.py
Original file line number Diff line number Diff line change
Expand Up @@ -736,7 +736,7 @@ class PreTrainedModel
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/cohere/modeling_cohere.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,7 @@ def __init__(self, config: CohereConfig):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/cohere2/modeling_cohere2.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ def __init__(self, config: Cohere2Config):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ def get_placeholder_mask(
)
return special_image_mask

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -306,7 +306,7 @@ def vision_tower(self):
def multi_modal_projector(self):
return self.model.multi_modal_projector

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def get_image_features(self, pixel_values: torch.FloatTensor):
image_features = self.multi_modal_projector(selected_image_feature)
return image_features

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -160,7 +160,7 @@ class Cohere2VisionForConditionalGeneration(AyaVisionForConditionalGeneration):
def get_image_features(self, pixel_values: torch.FloatTensor):
return self.model.get_image_features(pixel_values=pixel_values)

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/csm/modeling_csm.py
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ def __init__(self, config):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -662,7 +662,7 @@ def __init__(self, config):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/csm/modular_csm.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ def __init__(self, config):
self.embed_tokens = nn.Embedding((config.num_codebooks * config.vocab_size), config.backbone_hidden_size)
self.inputs_embeds_projector = nn.Linear(config.backbone_hidden_size, config.hidden_size, bias=False)

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -395,7 +395,7 @@ def __init__(self, config):
super().__init__(config)
self.embed_tokens = CsmBackboneModelEmbeddings(config)

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(self, **super_kwargs):
r"""
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/data2vec/modeling_data2vec_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -696,7 +696,7 @@ class PreTrainedModel
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,7 @@ def __init__(self, config: DeepseekV2Config):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -539,7 +539,7 @@ def __init__(self, config: DeepseekV3Config):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/deit/modeling_deit.py
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@ class PreTrainedModel
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/diffllama/modeling_diffllama.py
Original file line number Diff line number Diff line change
Expand Up @@ -608,7 +608,7 @@ def __init__(self, config: DiffLlamaConfig):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/dinov2/modeling_dinov2.py
Original file line number Diff line number Diff line change
Expand Up @@ -496,7 +496,7 @@ class PreTrainedModel
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -619,7 +619,7 @@ def __init__(self, config):
def get_input_embeddings(self) -> Dinov2PatchEmbeddings:
return self.embeddings.patch_embeddings

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool] = None, **kwargs
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -513,7 +513,7 @@ class PreTrainedModel
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -639,7 +639,7 @@ def __init__(self, config):
def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
return self.embeddings.patch_embeddings

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/dinov3_vit/modeling_dinov3_vit.py
Original file line number Diff line number Diff line change
Expand Up @@ -494,7 +494,7 @@ def __init__(self, config: DINOv3ViTConfig):
def get_input_embeddings(self):
return self.embeddings.patch_embeddings

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/dinov3_vit/modular_dinov3_vit.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ def __init__(self, config: DINOv3ViTConfig):
def get_input_embeddings(self):
return self.embeddings.patch_embeddings

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/doge/modeling_doge.py
Original file line number Diff line number Diff line change
Expand Up @@ -530,7 +530,7 @@ def __init__(self, config: DogeConfig):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
Loading