Skip to content

Commit 29b0770

Browse files
authored
Remove deprecated objects (#43170)
* remove stuff * oupsi * skip * fix
1 parent 942f110 commit 29b0770

23 files changed

+17
-1195
lines changed

docs/source/en/model_doc/led.md

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -181,11 +181,6 @@ print(tokenizer.decode(output[0], skip_special_tokens=True))
181181
[[autodoc]] LEDForConditionalGeneration
182182
- forward
183183

184-
## LEDForSequenceClassification
185-
186-
[[autodoc]] LEDForSequenceClassification
187-
- forward
188-
189184
## LEDForQuestionAnswering
190185

191186
[[autodoc]] LEDForQuestionAnswering

src/transformers/__init__.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,6 @@
133133
"is_wandb_available",
134134
],
135135
"loss": [],
136-
"modelcard": ["ModelCard"],
137136
"pipelines": [
138137
"AnyToAnyPipeline",
139138
"AudioClassificationPipeline",
@@ -615,8 +614,7 @@
615614
from .masking_utils import AttentionMaskInterface as AttentionMaskInterface
616615
from .model_debugging_utils import model_addition_debugger_context as model_addition_debugger_context
617616

618-
# Model Cards
619-
from .modelcard import ModelCard as ModelCard
617+
# Models
620618
from .modeling_layers import GradientCheckpointingLayer as GradientCheckpointingLayer
621619
from .modeling_rope_utils import ROPE_INIT_FUNCTIONS as ROPE_INIT_FUNCTIONS
622620
from .modeling_rope_utils import RopeParameters as RopeParameters

src/transformers/modelcard.py

Lines changed: 0 additions & 178 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,7 @@
1313
# limitations under the License.
1414
"""Configuration base class and utilities."""
1515

16-
import copy
17-
import json
1816
import os
19-
import warnings
2017
from dataclasses import dataclass
2118
from pathlib import Path
2219
from typing import Any
@@ -47,8 +44,6 @@
4744
)
4845
from .training_args import ParallelMode
4946
from .utils import (
50-
MODEL_CARD_NAME,
51-
cached_file,
5247
is_datasets_available,
5348
is_tokenizers_available,
5449
is_torch_available,
@@ -76,179 +71,6 @@
7671
logger = logging.get_logger(__name__)
7772

7873

79-
class ModelCard:
80-
r"""
81-
Structured Model Card class. Store model card as well as methods for loading/downloading/saving model cards.
82-
83-
Please read the following paper for details and explanation on the sections: "Model Cards for Model Reporting" by
84-
Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer,
85-
Inioluwa Deborah Raji and Timnit Gebru for the proposal behind model cards. Link: https://huggingface.co/papers/1810.03993
86-
87-
Note: A model card can be loaded and saved to disk.
88-
"""
89-
90-
def __init__(self, **kwargs):
91-
warnings.warn(
92-
"The class `ModelCard` is deprecated and will be removed in version 5 of Transformers", FutureWarning
93-
)
94-
# Recommended attributes from https://huggingface.co/papers/1810.03993 (see papers)
95-
self.model_details = kwargs.pop("model_details", {})
96-
self.intended_use = kwargs.pop("intended_use", {})
97-
self.factors = kwargs.pop("factors", {})
98-
self.metrics = kwargs.pop("metrics", {})
99-
self.evaluation_data = kwargs.pop("evaluation_data", {})
100-
self.training_data = kwargs.pop("training_data", {})
101-
self.quantitative_analyses = kwargs.pop("quantitative_analyses", {})
102-
self.ethical_considerations = kwargs.pop("ethical_considerations", {})
103-
self.caveats_and_recommendations = kwargs.pop("caveats_and_recommendations", {})
104-
105-
# Open additional attributes
106-
for key, value in kwargs.items():
107-
try:
108-
setattr(self, key, value)
109-
except AttributeError as err:
110-
logger.error(f"Can't set {key} with value {value} for {self}")
111-
raise err
112-
113-
def save_pretrained(self, save_directory_or_file):
114-
"""Save a model card object to the directory or file `save_directory_or_file`."""
115-
if os.path.isdir(save_directory_or_file):
116-
# If we save using the predefined names, we can load using `from_pretrained`
117-
output_model_card_file = os.path.join(save_directory_or_file, MODEL_CARD_NAME)
118-
else:
119-
output_model_card_file = save_directory_or_file
120-
121-
self.to_json_file(output_model_card_file)
122-
logger.info(f"Model card saved in {output_model_card_file}")
123-
124-
@classmethod
125-
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
126-
r"""
127-
Instantiate a [`ModelCard`] from a pre-trained model model card.
128-
129-
Parameters:
130-
pretrained_model_name_or_path: either:
131-
132-
- a string, the *model id* of a pretrained model card hosted inside a model repo on huggingface.co.
133-
- a path to a *directory* containing a model card file saved using the [`~ModelCard.save_pretrained`]
134-
method, e.g.: `./my_model_directory/`.
135-
- a path or url to a saved model card JSON *file*, e.g.: `./my_model_directory/modelcard.json`.
136-
137-
cache_dir: (*optional*) string:
138-
Path to a directory in which a downloaded pre-trained model card should be cached if the standard cache
139-
should not be used.
140-
141-
kwargs: (*optional*) dict: key/value pairs with which to update the ModelCard object after loading.
142-
143-
- The values in kwargs of any keys which are model card attributes will be used to override the loaded
144-
values.
145-
- Behavior concerning key/value pairs whose keys are *not* model card attributes is controlled by the
146-
*return_unused_kwargs* keyword parameter.
147-
148-
proxies: (*optional*) dict, default None:
149-
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128',
150-
'http://hostname': 'foo.bar:4012'}. The proxies are used on each request.
151-
152-
return_unused_kwargs: (*optional*) bool:
153-
154-
- If False, then this function returns just the final model card object.
155-
- If True, then this functions returns a tuple *(model card, unused_kwargs)* where *unused_kwargs* is a
156-
dictionary consisting of the key/value pairs whose keys are not model card attributes: ie the part of
157-
kwargs which has not been used to update *ModelCard* and is otherwise ignored.
158-
159-
Examples:
160-
161-
```python
162-
# Download model card from huggingface.co and cache.
163-
modelcard = ModelCard.from_pretrained("google-bert/bert-base-uncased")
164-
# Model card was saved using *save_pretrained('./test/saved_model/')*
165-
modelcard = ModelCard.from_pretrained("./test/saved_model/")
166-
modelcard = ModelCard.from_pretrained("./test/saved_model/modelcard.json")
167-
modelcard = ModelCard.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False)
168-
```"""
169-
cache_dir = kwargs.pop("cache_dir", None)
170-
proxies = kwargs.pop("proxies", None)
171-
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
172-
from_pipeline = kwargs.pop("_from_pipeline", None)
173-
174-
user_agent = {"file_type": "model_card"}
175-
if from_pipeline is not None:
176-
user_agent["using_pipeline"] = from_pipeline
177-
178-
is_local = os.path.isdir(pretrained_model_name_or_path)
179-
if os.path.isfile(pretrained_model_name_or_path):
180-
resolved_model_card_file = pretrained_model_name_or_path
181-
is_local = True
182-
else:
183-
try:
184-
# Load from URL or cache if already cached
185-
resolved_model_card_file = cached_file(
186-
pretrained_model_name_or_path,
187-
filename=MODEL_CARD_NAME,
188-
cache_dir=cache_dir,
189-
proxies=proxies,
190-
user_agent=user_agent,
191-
)
192-
if is_local:
193-
logger.info(f"loading model card file {resolved_model_card_file}")
194-
else:
195-
logger.info(f"loading model card file {MODEL_CARD_NAME} from cache at {resolved_model_card_file}")
196-
# Load model card
197-
modelcard = cls.from_json_file(resolved_model_card_file)
198-
199-
except (OSError, json.JSONDecodeError):
200-
# We fall back on creating an empty model card
201-
modelcard = cls()
202-
203-
# Update model card with kwargs if needed
204-
to_remove = []
205-
for key, value in kwargs.items():
206-
if hasattr(modelcard, key):
207-
setattr(modelcard, key, value)
208-
to_remove.append(key)
209-
for key in to_remove:
210-
kwargs.pop(key, None)
211-
212-
logger.info(f"Model card: {modelcard}")
213-
if return_unused_kwargs:
214-
return modelcard, kwargs
215-
else:
216-
return modelcard
217-
218-
@classmethod
219-
def from_dict(cls, json_object):
220-
"""Constructs a `ModelCard` from a Python dictionary of parameters."""
221-
return cls(**json_object)
222-
223-
@classmethod
224-
def from_json_file(cls, json_file):
225-
"""Constructs a `ModelCard` from a json file of parameters."""
226-
with open(json_file, encoding="utf-8") as reader:
227-
text = reader.read()
228-
dict_obj = json.loads(text)
229-
return cls(**dict_obj)
230-
231-
def __eq__(self, other):
232-
return self.__dict__ == other.__dict__
233-
234-
def __repr__(self):
235-
return str(self.to_json_string())
236-
237-
def to_dict(self):
238-
"""Serializes this instance to a Python dictionary."""
239-
output = copy.deepcopy(self.__dict__)
240-
return output
241-
242-
def to_json_string(self):
243-
"""Serializes this instance to a JSON string."""
244-
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
245-
246-
def to_json_file(self, json_file_path):
247-
"""Save this instance to a json file."""
248-
with open(json_file_path, "w", encoding="utf-8") as writer:
249-
writer.write(self.to_json_string())
250-
251-
25274
AUTOGENERATED_TRAINER_COMMENT = """
25375
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
25476
should probably proofread and complete it, then remove this comment. -->

src/transformers/modeling_outputs.py

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import warnings
1615
from dataclasses import dataclass
1716

1817
import torch
@@ -1705,12 +1704,3 @@ class MaskedImageModelingOutput(ModelOutput):
17051704
reconstruction: torch.FloatTensor | None = None
17061705
hidden_states: tuple[torch.FloatTensor, ...] | None = None
17071706
attentions: tuple[torch.FloatTensor, ...] | None = None
1708-
1709-
@property
1710-
def logits(self):
1711-
warnings.warn(
1712-
"logits attribute is deprecated and will be removed in version 5 of Transformers."
1713-
" Please use the reconstruction attribute to retrieve the final output instead.",
1714-
FutureWarning,
1715-
)
1716-
return self.reconstruction

src/transformers/models/auto/modeling_auto.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1280,7 +1280,6 @@ class _BaseModelWithGenerate(PreTrainedModel, GenerationMixin):
12801280
("layoutlm", "LayoutLMForSequenceClassification"),
12811281
("layoutlmv2", "LayoutLMv2ForSequenceClassification"),
12821282
("layoutlmv3", "LayoutLMv3ForSequenceClassification"),
1283-
("led", "LEDForSequenceClassification"),
12841283
("lilt", "LiltForSequenceClassification"),
12851284
("llama", "LlamaForSequenceClassification"),
12861285
("longformer", "LongformerForSequenceClassification"),

src/transformers/models/blip/modeling_blip.py

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
# limitations under the License.
1414
"""PyTorch BLIP model."""
1515

16-
import warnings
1716
from dataclasses import dataclass
1817
from typing import Any
1918

@@ -84,15 +83,6 @@ class BlipForConditionalGenerationModelOutput(ModelOutput):
8483
hidden_states: tuple[torch.FloatTensor, ...] | None = None
8584
attentions: tuple[torch.FloatTensor, ...] | None = None
8685

87-
@property
88-
def decoder_logits(self):
89-
warnings.warn(
90-
"`decoder_logits` attribute is deprecated and will be removed in version 5 of Transformers."
91-
" Please use the `logits` attribute to retrieve the final output instead.",
92-
FutureWarning,
93-
)
94-
return self.logits
95-
9686

9787
@dataclass
9888
@auto_docstring(

0 commit comments

Comments
 (0)