Skip to content

Commit 6c0c004

Browse files
committed
Revert "Add Dia model (huggingface#38405)"
This reverts commit 583db52. # Conflicts: # docs/source/en/model_doc/dia.md # src/transformers/configuration_utils.py # src/transformers/models/auto/processing_auto.py # src/transformers/models/auto/tokenization_auto.py # src/transformers/models/dac/modeling_dac.py # src/transformers/models/dia/generation_dia.py # src/transformers/models/dia/modeling_dia.py # src/transformers/models/dia/modular_dia.py # tests/models/dia/test_feature_extraction_dia.py # tests/models/dia/test_modeling_dia.py # tests/models/dia/test_processing_dia.py # utils/check_config_attributes.py
1 parent 06f8004 commit 6c0c004

33 files changed

+86
-5518
lines changed

docs/source/en/_toctree.yml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -879,8 +879,6 @@
879879
title: CSM
880880
- local: model_doc/dac
881881
title: dac
882-
- local: model_doc/dia
883-
title: Dia
884882
- local: model_doc/encodec
885883
title: EnCodec
886884
- local: model_doc/fastspeech2_conformer

docs/source/en/model_doc/auto.md

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -354,10 +354,6 @@ The following auto classes are available for the following audio tasks.
354354

355355
[[autodoc]] AutoModelForTextToWaveform
356356

357-
### AutoModelForAudioTokenization
358-
359-
[[autodoc]] AutoModelForAudioTokenization
360-
361357
## Multimodal
362358

363359
The following auto classes are available for the following multimodal tasks.

docs/source/en/model_doc/dia.md

Lines changed: 0 additions & 162 deletions
This file was deleted.

src/transformers/configuration_utils.py

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -319,6 +319,64 @@ def __init__(
319319
for parameter_name, default_value in self._get_global_generation_defaults().items():
320320
setattr(self, parameter_name, kwargs.pop(parameter_name, default_value))
321321

322+
# Fine-tuning task arguments
323+
self.architectures = kwargs.pop("architectures", None)
324+
self.finetuning_task = kwargs.pop("finetuning_task", None)
325+
self.id2label = kwargs.pop("id2label", None)
326+
self.label2id = kwargs.pop("label2id", None)
327+
if self.label2id is not None and not isinstance(self.label2id, dict):
328+
raise ValueError("Argument label2id should be a dictionary.")
329+
if self.id2label is not None:
330+
if not isinstance(self.id2label, dict):
331+
raise ValueError("Argument id2label should be a dictionary.")
332+
num_labels = kwargs.pop("num_labels", None)
333+
if num_labels is not None and len(self.id2label) != num_labels:
334+
logger.warning(
335+
f"You passed along `num_labels={num_labels}` with an incompatible id to label map: "
336+
f"{self.id2label}. The number of labels will be overwritten to {self.num_labels}."
337+
)
338+
self.id2label = {int(key): value for key, value in self.id2label.items()}
339+
# Keys are always strings in JSON so convert ids to int here.
340+
else:
341+
self.num_labels = kwargs.pop("num_labels", 2)
342+
343+
if self.torch_dtype is not None and isinstance(self.torch_dtype, str):
344+
# we will start using self.torch_dtype in v5, but to be consistent with
345+
# from_pretrained's torch_dtype arg convert it to an actual torch.dtype object
346+
if is_torch_available():
347+
import torch
348+
349+
self.torch_dtype = getattr(torch, self.torch_dtype)
350+
351+
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
352+
self.tokenizer_class = kwargs.pop("tokenizer_class", None)
353+
self.prefix = kwargs.pop("prefix", None)
354+
self.bos_token_id = kwargs.pop("bos_token_id", None)
355+
self.pad_token_id = kwargs.pop("pad_token_id", None)
356+
self.eos_token_id = kwargs.pop("eos_token_id", None)
357+
self.sep_token_id = kwargs.pop("sep_token_id", None)
358+
359+
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
360+
361+
# task specific arguments
362+
self.task_specific_params = kwargs.pop("task_specific_params", None)
363+
364+
# regression / multi-label classification
365+
self.problem_type = kwargs.pop("problem_type", None)
366+
allowed_problem_types = ("regression", "single_label_classification", "multi_label_classification")
367+
if self.problem_type is not None and self.problem_type not in allowed_problem_types:
368+
raise ValueError(
369+
f"The config parameter `problem_type` was not understood: received {self.problem_type} "
370+
"but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid."
371+
)
372+
373+
# TPU arguments
374+
if kwargs.pop("xla_device", None) is not None:
375+
logger.warning(
376+
"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
377+
"safely remove it from your `config.json` file."
378+
)
379+
322380
# Name or path to the pretrained checkpoint
323381
self._name_or_path = str(kwargs.pop("name_or_path", ""))
324382
self._commit_hash = kwargs.pop("_commit_hash", None)

0 commit comments

Comments
 (0)