From 56d016ba38825c789515615d189bc4669bb9c913 Mon Sep 17 00:00:00 2001 From: nouamanetazi Date: Sat, 22 Mar 2025 10:59:37 +0000 Subject: [PATCH 01/13] . --- src/lighteval/config/lighteval_config.py | 8 +++ src/lighteval/main_nanotron.py | 51 ++++++++++++------- src/lighteval/models/__init__.py | 21 ++++++++ src/lighteval/models/nanotron/__init__.py | 21 ++++++++ .../models/nanotron/nanotron_model.py | 20 +++++--- src/lighteval/models/nanotron_model.py | 26 ++++++++++ src/lighteval/pipeline.py | 7 ++- 7 files changed, 128 insertions(+), 26 deletions(-) create mode 100644 src/lighteval/models/__init__.py create mode 100644 src/lighteval/models/nanotron/__init__.py create mode 100644 src/lighteval/models/nanotron_model.py diff --git a/src/lighteval/config/lighteval_config.py b/src/lighteval/config/lighteval_config.py index f24a15184..0e8217afe 100644 --- a/src/lighteval/config/lighteval_config.py +++ b/src/lighteval/config/lighteval_config.py @@ -101,3 +101,11 @@ class LightEvalConfig: class FullNanotronConfig: lighteval_config: LightEvalConfig nanotron_config: "Config" + + @property + def generation_parameters(self): + # Return the generation parameters from the lighteval config + # or create default generation parameters if none are set + if self.lighteval_config.generation: + return self.lighteval_config.generation + return GenerationArgs() diff --git a/src/lighteval/main_nanotron.py b/src/lighteval/main_nanotron.py index 94004c065..1b973a112 100644 --- a/src/lighteval/main_nanotron.py +++ b/src/lighteval/main_nanotron.py @@ -42,17 +42,17 @@ def nanotron( checkpoint_config_path: Annotated[ str, Option(help="Path to the nanotron checkpoint YAML or python config file, potentially on s3.") ], - lighteval_config_path: Annotated[str, Option(help="Path to a YAML config to be used for the evaluation.")], + lighteval_config_path: Annotated[str, Option(help="Path to a YAML config to be used for the evaluation.")] = None, cache_dir: Annotated[str, Option(help="Cache directory for datasets and models.")] = CACHE_DIR, ): """ Evaluate models using nanotron as backend. """ from nanotron.config import Config, get_config_from_file + from nanotron.config.parallelism_config import ParallelismArgs - from lighteval.config.lighteval_config import FullNanotronConfig, LightEvalConfig + from lighteval.config.lighteval_config import FullNanotronConfig, LightEvalConfig, LightEvalLoggingArgs, LightEvalTasksArgs from lighteval.logging.evaluation_tracker import EvaluationTracker - from lighteval.logging.hierarchical_logger import htrack_block from lighteval.pipeline import ParallelismManager, Pipeline, PipelineParameters from lighteval.utils.imports import NO_NANOTRON_ERROR_MSG, is_nanotron_available from lighteval.utils.utils import EnvConfig @@ -61,23 +61,38 @@ def nanotron( if not is_nanotron_available(): raise ImportError(NO_NANOTRON_ERROR_MSG) + + # Create nanotron config + if not checkpoint_config_path.endswith(".yaml"): + raise ValueError("The checkpoint path should point to a YAML file") + + model_config = get_config_from_file( + checkpoint_config_path, + config_class=Config, + model_config_class=None, + skip_unused_config_keys=True, + skip_null_keys=True, + ) - with htrack_block("Load nanotron config"): - # Create nanotron config - if not checkpoint_config_path.endswith(".yaml"): - raise ValueError("The checkpoint path should point to a YAML file") - - model_config = get_config_from_file( - checkpoint_config_path, - config_class=Config, - model_config_class=None, - skip_unused_config_keys=True, - skip_null_keys=True, - ) - - # We are getting an type error, because the get_config_from_file is not correctly typed, + # Create or use default lighteval config + if lighteval_config_path is not None: lighteval_config: LightEvalConfig = get_config_from_file(lighteval_config_path, config_class=LightEvalConfig) # type: ignore - nanotron_config = FullNanotronConfig(lighteval_config, model_config) + else: + # Create default config with minimal required parameters + default_logging = LightEvalLoggingArgs( + output_dir="./eval_results" + ) + default_tasks = LightEvalTasksArgs( + tasks="lighteval|agieval:aqua-rat|5|0" + ) + default_parallelism = ParallelismArgs(dp=1, pp=1, tp=1) + lighteval_config = LightEvalConfig( + logging=default_logging, + tasks=default_tasks, + parallelism=default_parallelism + ) + + nanotron_config = FullNanotronConfig(lighteval_config, model_config) evaluation_tracker = EvaluationTracker( output_dir=lighteval_config.logging.output_dir, diff --git a/src/lighteval/models/__init__.py b/src/lighteval/models/__init__.py new file mode 100644 index 000000000..064e2842d --- /dev/null +++ b/src/lighteval/models/__init__.py @@ -0,0 +1,21 @@ +# MIT License + +# Copyright (c) 2024 The HuggingFace Team + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. \ No newline at end of file diff --git a/src/lighteval/models/nanotron/__init__.py b/src/lighteval/models/nanotron/__init__.py new file mode 100644 index 000000000..064e2842d --- /dev/null +++ b/src/lighteval/models/nanotron/__init__.py @@ -0,0 +1,21 @@ +# MIT License + +# Copyright (c) 2024 The HuggingFace Team + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. \ No newline at end of file diff --git a/src/lighteval/models/nanotron/nanotron_model.py b/src/lighteval/models/nanotron/nanotron_model.py index 5f139174c..17b027438 100644 --- a/src/lighteval/models/nanotron/nanotron_model.py +++ b/src/lighteval/models/nanotron/nanotron_model.py @@ -343,7 +343,14 @@ def tok_decode(self, tokens: torch.LongTensor) -> List[str]: return self.tokenizer.batch_decode(tokens, skip_special_tokens=True) def _model_call(self, inputs: torch.Tensor) -> torch.Tensor: - return self.model(inputs) + position_ids = ( + torch.arange( + inputs.shape[1], device=inputs.device, dtype=torch.int32 + ) + .unsqueeze(0) + .repeat(inputs.shape[0], 1) + ) + return self.model(inputs, position_ids) def homogeneize_ending_conditions(self, ending_condition: tuple | dict | list | str) -> tuple[list, int]: """Ending conditions are submitted in several possible formats. @@ -711,14 +718,14 @@ def _loglikelihood_single_token( inputs, padding_length=max_context, max_context=max_context, full_attention_masks=True ) # batched_inputs, batch_attention, input_lengths, truncated, padded - - out = self.model(input_ids=batch_model.input_ids, input_mask=batch_model.input_mask) + position_ids = torch.arange(batch_model.input_ids.shape[1], device=self.device, dtype=torch.int32).unsqueeze(0).repeat(batch_model.input_ids.shape[0], 1) + out = self.model(input_ids=batch_model.input_ids, position_ids=position_ids) if dist.get_rank(self.parallel_context.pp_pg) == self.output_pp_rank: # This process got outputs # Gather all the output accross TP - out = out.transpose(0, 1).contiguous() # [batch, seq_length, vocab] + out = out.view(*batch_model.input_ids.shape, -1).contiguous() # [batch, seq_length, vocab] gathered_out = [torch.zeros_like(out) for _ in range(self.parallel_context.tp_pg.size())] dist.all_gather(gathered_out, out, group=self.parallel_context.tp_pg, async_op=False) @@ -944,7 +951,8 @@ def _loglikelihood_tokens( ) # batched_inputs, batch_attention, input_lengths, truncated, padded with torch.no_grad(): - out = self.model(input_ids=batch_model.input_ids, input_mask=batch_model.input_mask) + position_ids = torch.arange(batch_model.input_ids.shape[1], device=self.device, dtype=torch.int32).unsqueeze(0).repeat(batch_model.input_ids.shape[0], 1) + out = self.model(input_ids=batch_model.input_ids, position_ids=position_ids) if dist.get_rank(self.parallel_context.pp_pg) == self.output_pp_rank: # This process got outputs @@ -954,7 +962,7 @@ def _loglikelihood_tokens( dist.all_gather(gathered_out, out, group=self.parallel_context.tp_pg, async_op=False) out = torch.cat(gathered_out, dim=-1) - out = out.transpose(0, 1) # [batch, seq_length, vocab] + out = out.view(*batch_model.input_ids.shape, -1) # [batch, seq_length, vocab] multi_logits = F.log_softmax(out, dim=-1) # [batch, padding_length, vocab] logits_sum = [] diff --git a/src/lighteval/models/nanotron_model.py b/src/lighteval/models/nanotron_model.py new file mode 100644 index 000000000..4a1ed72c6 --- /dev/null +++ b/src/lighteval/models/nanotron_model.py @@ -0,0 +1,26 @@ +# MIT License + +# Copyright (c) 2024 The HuggingFace Team + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# Import and re-export the NanotronLightevalModel class from the nanotron module +from lighteval.models.nanotron.nanotron_model import NanotronLightevalModel + +__all__ = ["NanotronLightevalModel"] \ No newline at end of file diff --git a/src/lighteval/pipeline.py b/src/lighteval/pipeline.py index b83403755..fbb97da7f 100644 --- a/src/lighteval/pipeline.py +++ b/src/lighteval/pipeline.py @@ -72,7 +72,7 @@ from nanotron.parallel.context import ParallelContext from nanotron.utils import local_ranks_zero_first - from lighteval.models.nanotron_model import NanotronLightevalModel + # from lighteval.models.nanotron import NanotronLightevalModel import logging @@ -188,16 +188,19 @@ def _init_model(self, model_config, model): logger.info("--- LOADING MODEL ---") if model_config is not None: if self.parallel_context: + from lighteval.models.nanotron_model import NanotronLightevalModel + return NanotronLightevalModel( checkpoint_path=os.path.dirname(self.pipeline_parameters.nanotron_checkpoint_path) if self.pipeline_parameters.nanotron_checkpoint_path else "", - nanotron_config=self.model_config, + nanotron_config=model_config, parallel_context=self.parallel_context, debug_one_layer_model=False, model_class=None, env_config=self.pipeline_parameters.env_config, ) + # return None else: return load_model(config=model_config, env_config=self.pipeline_parameters.env_config) if isinstance(model, TransformersModel): From 0bb5d615276143becb6838ead47fd63d593cff6f Mon Sep 17 00:00:00 2001 From: nouamanetazi Date: Sat, 22 Mar 2025 11:31:06 +0000 Subject: [PATCH 02/13] . --- src/lighteval/main_nanotron.py | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/src/lighteval/main_nanotron.py b/src/lighteval/main_nanotron.py index 1b973a112..22755997a 100644 --- a/src/lighteval/main_nanotron.py +++ b/src/lighteval/main_nanotron.py @@ -42,7 +42,7 @@ def nanotron( checkpoint_config_path: Annotated[ str, Option(help="Path to the nanotron checkpoint YAML or python config file, potentially on s3.") ], - lighteval_config_path: Annotated[str, Option(help="Path to a YAML config to be used for the evaluation.")] = None, + lighteval_config_path: Annotated[str, Option(help="Path to a YAML config to be used for the evaluation.")], cache_dir: Annotated[str, Option(help="Cache directory for datasets and models.")] = CACHE_DIR, ): """ @@ -74,23 +74,8 @@ def nanotron( skip_null_keys=True, ) - # Create or use default lighteval config - if lighteval_config_path is not None: - lighteval_config: LightEvalConfig = get_config_from_file(lighteval_config_path, config_class=LightEvalConfig) # type: ignore - else: - # Create default config with minimal required parameters - default_logging = LightEvalLoggingArgs( - output_dir="./eval_results" - ) - default_tasks = LightEvalTasksArgs( - tasks="lighteval|agieval:aqua-rat|5|0" - ) - default_parallelism = ParallelismArgs(dp=1, pp=1, tp=1) - lighteval_config = LightEvalConfig( - logging=default_logging, - tasks=default_tasks, - parallelism=default_parallelism - ) + # Load lighteval config + lighteval_config: LightEvalConfig = get_config_from_file(lighteval_config_path, config_class=LightEvalConfig) # type: ignore nanotron_config = FullNanotronConfig(lighteval_config, model_config) From 404c4890697e9c488cf9e057fe3015ad927e6917 Mon Sep 17 00:00:00 2001 From: nouamanetazi Date: Sat, 22 Mar 2025 11:37:11 +0000 Subject: [PATCH 03/13] . --- src/lighteval/models/__init__.py | 21 ------------------ src/lighteval/models/nanotron/__init__.py | 21 ------------------ src/lighteval/models/nanotron_model.py | 26 ----------------------- src/lighteval/pipeline.py | 7 ++---- 4 files changed, 2 insertions(+), 73 deletions(-) delete mode 100644 src/lighteval/models/__init__.py delete mode 100644 src/lighteval/models/nanotron/__init__.py delete mode 100644 src/lighteval/models/nanotron_model.py diff --git a/src/lighteval/models/__init__.py b/src/lighteval/models/__init__.py deleted file mode 100644 index 064e2842d..000000000 --- a/src/lighteval/models/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# MIT License - -# Copyright (c) 2024 The HuggingFace Team - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. \ No newline at end of file diff --git a/src/lighteval/models/nanotron/__init__.py b/src/lighteval/models/nanotron/__init__.py deleted file mode 100644 index 064e2842d..000000000 --- a/src/lighteval/models/nanotron/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# MIT License - -# Copyright (c) 2024 The HuggingFace Team - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. \ No newline at end of file diff --git a/src/lighteval/models/nanotron_model.py b/src/lighteval/models/nanotron_model.py deleted file mode 100644 index 4a1ed72c6..000000000 --- a/src/lighteval/models/nanotron_model.py +++ /dev/null @@ -1,26 +0,0 @@ -# MIT License - -# Copyright (c) 2024 The HuggingFace Team - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -# Import and re-export the NanotronLightevalModel class from the nanotron module -from lighteval.models.nanotron.nanotron_model import NanotronLightevalModel - -__all__ = ["NanotronLightevalModel"] \ No newline at end of file diff --git a/src/lighteval/pipeline.py b/src/lighteval/pipeline.py index fbb97da7f..dec9c3cd0 100644 --- a/src/lighteval/pipeline.py +++ b/src/lighteval/pipeline.py @@ -72,7 +72,7 @@ from nanotron.parallel.context import ParallelContext from nanotron.utils import local_ranks_zero_first - # from lighteval.models.nanotron import NanotronLightevalModel + from lighteval.models.nanotron.nanotron_model import NanotronLightevalModel import logging @@ -187,9 +187,7 @@ def _init_parallelism_manager(self): def _init_model(self, model_config, model): logger.info("--- LOADING MODEL ---") if model_config is not None: - if self.parallel_context: - from lighteval.models.nanotron_model import NanotronLightevalModel - + if self.parallel_context: return NanotronLightevalModel( checkpoint_path=os.path.dirname(self.pipeline_parameters.nanotron_checkpoint_path) if self.pipeline_parameters.nanotron_checkpoint_path @@ -200,7 +198,6 @@ def _init_model(self, model_config, model): model_class=None, env_config=self.pipeline_parameters.env_config, ) - # return None else: return load_model(config=model_config, env_config=self.pipeline_parameters.env_config) if isinstance(model, TransformersModel): From 03c1195780e59003c00c0f1b934179bd0fd583bc Mon Sep 17 00:00:00 2001 From: Jason Stillerman Date: Wed, 26 Mar 2025 00:53:13 +0000 Subject: [PATCH 04/13] allow extra keywords in LightevalTaskConfig --- src/lighteval/tasks/lighteval_task.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/lighteval/tasks/lighteval_task.py b/src/lighteval/tasks/lighteval_task.py index 3420480b2..e7761a206 100644 --- a/src/lighteval/tasks/lighteval_task.py +++ b/src/lighteval/tasks/lighteval_task.py @@ -107,6 +107,7 @@ class LightevalTaskConfig: few_shots_select: Optional[str] = None # Generation args + output_regex: Optional[str] = None generation_size: Optional[int] = None generation_grammar: Optional[TextGenerationInputGrammarType] = None stop_sequence: Optional[ListLike[str]] = None @@ -120,6 +121,7 @@ class LightevalTaskConfig: must_remove_duplicate_docs: bool = False version: int = 0 + frozen: bool = False def __post_init__(self): # If we got a Metrics enums instead of a Metric, we convert From aca6efac89661e2d1cddc686cc208e5809f344d0 Mon Sep 17 00:00:00 2001 From: anton Date: Mon, 31 Mar 2025 11:31:50 +0200 Subject: [PATCH 05/13] nanotron updates --- src/lighteval/data.py | 3 +-- src/lighteval/main_nanotron.py | 32 +++++++++++++++----------------- src/lighteval/pipeline.py | 8 ++++++-- 3 files changed, 22 insertions(+), 21 deletions(-) diff --git a/src/lighteval/data.py b/src/lighteval/data.py index dd1fd5341..65506c869 100644 --- a/src/lighteval/data.py +++ b/src/lighteval/data.py @@ -110,13 +110,12 @@ def get_original_order(self, new_arr: list) -> list: list: new_arr in the original order. """ original_order = [None] * self.total_size - for original_index, v in zip(self.original_order, new_arr): original_order[original_index] = v if None in original_order: raise RuntimeError( - f"Some elements of the original order are None, meaning that len(new_arr) ({len(new_arr)}) != len(original_array) ({self.total_size})" + f"Some elements of the original order are None, meaning that len(new_arr) ({len(new_arr)}) != len(original_array) ({self.total_size}) original_order {len(self.original_order)} {len(new_arr)} {self.total_size} {len(self.sorted_data)}" ) return original_order diff --git a/src/lighteval/main_nanotron.py b/src/lighteval/main_nanotron.py index 94004c065..0e200f141 100644 --- a/src/lighteval/main_nanotron.py +++ b/src/lighteval/main_nanotron.py @@ -52,7 +52,6 @@ def nanotron( from lighteval.config.lighteval_config import FullNanotronConfig, LightEvalConfig from lighteval.logging.evaluation_tracker import EvaluationTracker - from lighteval.logging.hierarchical_logger import htrack_block from lighteval.pipeline import ParallelismManager, Pipeline, PipelineParameters from lighteval.utils.imports import NO_NANOTRON_ERROR_MSG, is_nanotron_available from lighteval.utils.utils import EnvConfig @@ -62,22 +61,21 @@ def nanotron( if not is_nanotron_available(): raise ImportError(NO_NANOTRON_ERROR_MSG) - with htrack_block("Load nanotron config"): - # Create nanotron config - if not checkpoint_config_path.endswith(".yaml"): - raise ValueError("The checkpoint path should point to a YAML file") - - model_config = get_config_from_file( - checkpoint_config_path, - config_class=Config, - model_config_class=None, - skip_unused_config_keys=True, - skip_null_keys=True, - ) - - # We are getting an type error, because the get_config_from_file is not correctly typed, - lighteval_config: LightEvalConfig = get_config_from_file(lighteval_config_path, config_class=LightEvalConfig) # type: ignore - nanotron_config = FullNanotronConfig(lighteval_config, model_config) + # Create nanotron config + if not checkpoint_config_path.endswith(".yaml"): + raise ValueError("The checkpoint path should point to a YAML file") + + model_config = get_config_from_file( + checkpoint_config_path, + config_class=Config, + model_config_class=None, + skip_unused_config_keys=True, + skip_null_keys=True, + ) + + # We are getting a type error, because the get_config_from_file is not correctly typed, + lighteval_config: LightEvalConfig = get_config_from_file(lighteval_config_path, config_class=LightEvalConfig) # type: ignore + nanotron_config = FullNanotronConfig(lighteval_config, model_config) evaluation_tracker = EvaluationTracker( output_dir=lighteval_config.logging.output_dir, diff --git a/src/lighteval/pipeline.py b/src/lighteval/pipeline.py index b83403755..fd560bbe2 100644 --- a/src/lighteval/pipeline.py +++ b/src/lighteval/pipeline.py @@ -72,7 +72,7 @@ from nanotron.parallel.context import ParallelContext from nanotron.utils import local_ranks_zero_first - from lighteval.models.nanotron_model import NanotronLightevalModel + from lighteval.models.nanotron.nanotron_model import NanotronLightevalModel import logging @@ -156,7 +156,11 @@ def __init__( self.accelerator, self.parallel_context = self._init_parallelism_manager() self.model = self._init_model(model_config, model) - generation_parameters = asdict(model_config.generation_parameters) if model_config else {} + generation_parameters = ( + asdict(model_config.generation_parameters) + if model_config and hasattr(model_config, "generation_parameters") + else {} + ) self.evaluation_tracker.general_config_logger.log_model_info(generation_parameters, self.model.model_info) self._init_tasks_and_requests(tasks=tasks) From 41986bccfa2f4101f200d1a1826ae26bb1d2af2c Mon Sep 17 00:00:00 2001 From: anton Date: Wed, 2 Apr 2025 23:52:04 +0200 Subject: [PATCH 06/13] nanotron updates --- src/lighteval/config/lighteval_config.py | 8 ++++++ .../models/nanotron/nanotron_model.py | 25 +++++++++++++++---- src/lighteval/pipeline.py | 8 ++---- src/lighteval/tasks/lighteval_task.py | 2 ++ src/lighteval/tasks/multilingual/tasks.py | 6 ++--- src/lighteval/tasks/templates/multichoice.py | 2 +- 6 files changed, 36 insertions(+), 15 deletions(-) diff --git a/src/lighteval/config/lighteval_config.py b/src/lighteval/config/lighteval_config.py index f24a15184..100ab5431 100644 --- a/src/lighteval/config/lighteval_config.py +++ b/src/lighteval/config/lighteval_config.py @@ -101,3 +101,11 @@ class LightEvalConfig: class FullNanotronConfig: lighteval_config: LightEvalConfig nanotron_config: "Config" + + @property + def generation_parameters(self): + # Return the generation parameters from the lighteval config + # or create default generation parameters if none are set + if self.lighteval_config.generation: + return self.lighteval_config.generation + return GenerationArgs() diff --git a/src/lighteval/models/nanotron/nanotron_model.py b/src/lighteval/models/nanotron/nanotron_model.py index 5f139174c..724c66ec9 100644 --- a/src/lighteval/models/nanotron/nanotron_model.py +++ b/src/lighteval/models/nanotron/nanotron_model.py @@ -343,7 +343,12 @@ def tok_decode(self, tokens: torch.LongTensor) -> List[str]: return self.tokenizer.batch_decode(tokens, skip_special_tokens=True) def _model_call(self, inputs: torch.Tensor) -> torch.Tensor: - return self.model(inputs) + position_ids = ( + torch.arange(inputs.shape[1], device=inputs.device, dtype=torch.int32) + .unsqueeze(0) + .repeat(inputs.shape[0], 1) + ) + return self.model(inputs, position_ids) def homogeneize_ending_conditions(self, ending_condition: tuple | dict | list | str) -> tuple[list, int]: """Ending conditions are submitted in several possible formats. @@ -712,13 +717,18 @@ def _loglikelihood_single_token( ) # batched_inputs, batch_attention, input_lengths, truncated, padded - out = self.model(input_ids=batch_model.input_ids, input_mask=batch_model.input_mask) + position_ids = ( + torch.arange(batch_model.input_ids.shape[1], device=self.device, dtype=torch.int32) + .unsqueeze(0) + .repeat(batch_model.input_ids.shape[0], 1) + ) + out = self.model(input_ids=batch_model.input_ids, position_ids=position_ids) if dist.get_rank(self.parallel_context.pp_pg) == self.output_pp_rank: # This process got outputs # Gather all the output accross TP - out = out.transpose(0, 1).contiguous() # [batch, seq_length, vocab] + out = out.view(*batch_model.input_ids.shape, -1).contiguous() # [batch, seq_length, vocab] gathered_out = [torch.zeros_like(out) for _ in range(self.parallel_context.tp_pg.size())] dist.all_gather(gathered_out, out, group=self.parallel_context.tp_pg, async_op=False) @@ -944,7 +954,12 @@ def _loglikelihood_tokens( ) # batched_inputs, batch_attention, input_lengths, truncated, padded with torch.no_grad(): - out = self.model(input_ids=batch_model.input_ids, input_mask=batch_model.input_mask) + position_ids = ( + torch.arange(batch_model.input_ids.shape[1], device=self.device, dtype=torch.int32) + .unsqueeze(0) + .repeat(batch_model.input_ids.shape[0], 1) + ) + out = self.model(input_ids=batch_model.input_ids, position_ids=position_ids) if dist.get_rank(self.parallel_context.pp_pg) == self.output_pp_rank: # This process got outputs @@ -954,7 +969,7 @@ def _loglikelihood_tokens( dist.all_gather(gathered_out, out, group=self.parallel_context.tp_pg, async_op=False) out = torch.cat(gathered_out, dim=-1) - out = out.transpose(0, 1) # [batch, seq_length, vocab] + out = out.view(*batch_model.input_ids.shape, -1) # [batch, seq_length, vocab] multi_logits = F.log_softmax(out, dim=-1) # [batch, padding_length, vocab] logits_sum = [] diff --git a/src/lighteval/pipeline.py b/src/lighteval/pipeline.py index fd560bbe2..83ab89b68 100644 --- a/src/lighteval/pipeline.py +++ b/src/lighteval/pipeline.py @@ -156,11 +156,7 @@ def __init__( self.accelerator, self.parallel_context = self._init_parallelism_manager() self.model = self._init_model(model_config, model) - generation_parameters = ( - asdict(model_config.generation_parameters) - if model_config and hasattr(model_config, "generation_parameters") - else {} - ) + generation_parameters = asdict(model_config.generation_parameters) if model_config else {} self.evaluation_tracker.general_config_logger.log_model_info(generation_parameters, self.model.model_info) self._init_tasks_and_requests(tasks=tasks) @@ -196,7 +192,7 @@ def _init_model(self, model_config, model): checkpoint_path=os.path.dirname(self.pipeline_parameters.nanotron_checkpoint_path) if self.pipeline_parameters.nanotron_checkpoint_path else "", - nanotron_config=self.model_config, + nanotron_config=model_config, parallel_context=self.parallel_context, debug_one_layer_model=False, model_class=None, diff --git a/src/lighteval/tasks/lighteval_task.py b/src/lighteval/tasks/lighteval_task.py index 3420480b2..e7761a206 100644 --- a/src/lighteval/tasks/lighteval_task.py +++ b/src/lighteval/tasks/lighteval_task.py @@ -107,6 +107,7 @@ class LightevalTaskConfig: few_shots_select: Optional[str] = None # Generation args + output_regex: Optional[str] = None generation_size: Optional[int] = None generation_grammar: Optional[TextGenerationInputGrammarType] = None stop_sequence: Optional[ListLike[str]] = None @@ -120,6 +121,7 @@ class LightevalTaskConfig: must_remove_duplicate_docs: bool = False version: int = 0 + frozen: bool = False def __post_init__(self): # If we got a Metrics enums instead of a Metric, we convert diff --git a/src/lighteval/tasks/multilingual/tasks.py b/src/lighteval/tasks/multilingual/tasks.py index 3d92a71e2..d697c00bc 100644 --- a/src/lighteval/tasks/multilingual/tasks.py +++ b/src/lighteval/tasks/multilingual/tasks.py @@ -2874,7 +2874,7 @@ ), suite=("lighteval",), hf_repo="INK-USC/xcsr", - hf_subset=f"X-CSQA-{standardize_tag(language.value)}", + hf_subset=f"X-CSQA-{standardize_tag(language.value) if language != Language.JAPANESE else 'jap'}", hf_filter=lambda x: all( len(x["question"]["choices"]["text"][i].strip()) > 0 for i in range(len(x["question"]["choices"]["text"])) ), @@ -3426,7 +3426,7 @@ prompt_function=get_mcq_prompt_function(language, partial(xcodah_adapter, language), formulation=formulation), suite=("lighteval",), hf_repo="INK-USC/xcsr", - hf_subset=f"X-CODAH-{standardize_tag(language.value)}", + hf_subset=f"X-CODAH-{standardize_tag(language.value) if language != Language.JAPANESE else 'jap'}", evaluation_splits=("validation",), hf_avail_splits=["validation"], metric=get_metrics_for_formulation( @@ -3533,7 +3533,7 @@ language, partial(winogrand_adapter, language), formulation=formulation ), hf_repo="Muennighoff/xwinograd", - hf_subset=standardize_tag(language.value), + hf_subset=standardize_tag(language.value) if language != Language.JAPANESE else "jp", evaluation_splits=("test",), hf_avail_splits=["test"], metric=[ diff --git a/src/lighteval/tasks/templates/multichoice.py b/src/lighteval/tasks/templates/multichoice.py index daa8fffd2..92808488e 100644 --- a/src/lighteval/tasks/templates/multichoice.py +++ b/src/lighteval/tasks/templates/multichoice.py @@ -136,7 +136,7 @@ def prompt_fn(line, task_name: str): context = f"{capitalize(fix_ending_punct(context_val, translation_literals))}\n" if context_val else "" question = capitalize(fix_ending_punct(mcq_input["question"], translation_literals)) - answers = [capitalize(fix_ending_punct(answer, translation_literals)) for answer in mcq_input["choices"]] + answers = [capitalize(fix_ending_punct(str(answer), translation_literals)) for answer in mcq_input["choices"]] options = build_choices(answers, formulation, translation_literals) options = f"{options}\n" if options else "" From 826bd83ca0cfe3a8b6b1ab9572bbe0a4368390de Mon Sep 17 00:00:00 2001 From: anton Date: Wed, 23 Apr 2025 18:33:12 +0200 Subject: [PATCH 07/13] ml task fixes --- src/lighteval/tasks/multilingual/tasks.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/lighteval/tasks/multilingual/tasks.py b/src/lighteval/tasks/multilingual/tasks.py index d697c00bc..cf5525b8a 100644 --- a/src/lighteval/tasks/multilingual/tasks.py +++ b/src/lighteval/tasks/multilingual/tasks.py @@ -1734,7 +1734,8 @@ lambda subset, sensitivity_label, x: x["subject"].lower() == subset and ( sensitivity_label == "ALL" or sensitivity_label in x["cultural_sensitivity_label"].replace("-", "UNK") - ), + ) + and all(x[f"option_{opt}"] is not None and x[f"option_{opt}"].strip() for opt in "abcd"), subset, sensitivity_label, ), @@ -3164,6 +3165,7 @@ stop_sequence=("\n",), ) for language in [ + Language.ENGLISH, Language.SPANISH, Language.FRENCH, Language.GERMAN, From edd44a477ae6ec6271a333c3734430192def8ff0 Mon Sep 17 00:00:00 2001 From: anton Date: Wed, 23 Apr 2025 18:43:58 +0200 Subject: [PATCH 08/13] style --- src/lighteval/data.py | 2 +- src/lighteval/pipeline.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lighteval/data.py b/src/lighteval/data.py index 65506c869..9587f02a7 100644 --- a/src/lighteval/data.py +++ b/src/lighteval/data.py @@ -115,7 +115,7 @@ def get_original_order(self, new_arr: list) -> list: if None in original_order: raise RuntimeError( - f"Some elements of the original order are None, meaning that len(new_arr) ({len(new_arr)}) != len(original_array) ({self.total_size}) original_order {len(self.original_order)} {len(new_arr)} {self.total_size} {len(self.sorted_data)}" + f"Some elements of the original order are None, meaning that len(new_arr) ({len(new_arr)}) != len(original_array) ({self.total_size})" ) return original_order diff --git a/src/lighteval/pipeline.py b/src/lighteval/pipeline.py index dec9c3cd0..83ab89b68 100644 --- a/src/lighteval/pipeline.py +++ b/src/lighteval/pipeline.py @@ -187,7 +187,7 @@ def _init_parallelism_manager(self): def _init_model(self, model_config, model): logger.info("--- LOADING MODEL ---") if model_config is not None: - if self.parallel_context: + if self.parallel_context: return NanotronLightevalModel( checkpoint_path=os.path.dirname(self.pipeline_parameters.nanotron_checkpoint_path) if self.pipeline_parameters.nanotron_checkpoint_path From 18880465bb4865ebc95ae4ad8c51a3f2b9d64d68 Mon Sep 17 00:00:00 2001 From: anton Date: Wed, 23 Apr 2025 18:56:30 +0200 Subject: [PATCH 09/13] style --- src/lighteval/data.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/lighteval/data.py b/src/lighteval/data.py index 9587f02a7..dd1fd5341 100644 --- a/src/lighteval/data.py +++ b/src/lighteval/data.py @@ -110,6 +110,7 @@ def get_original_order(self, new_arr: list) -> list: list: new_arr in the original order. """ original_order = [None] * self.total_size + for original_index, v in zip(self.original_order, new_arr): original_order[original_index] = v From c7c78052430998e77ae1f84e8844ad65d1a94259 Mon Sep 17 00:00:00 2001 From: anton Date: Mon, 28 Apr 2025 15:42:03 +0200 Subject: [PATCH 10/13] Revert "Merge branch 'main' into upd-nanotron" This reverts commit 39367664d07258997a19b48804531ab6f80b9404, reversing changes made to edd44a477ae6ec6271a333c3734430192def8ff0. --- .github/workflows/slow_tests.yaml | 47 -- .github/workflows/tests.yaml | 79 +-- README.md | 2 +- docs/source/_toctree.yml | 4 +- docs/source/adding-a-custom-task.mdx | 2 +- ...te-the-model-on-a-server-or-container.mdx} | 32 +- docs/source/package_reference/models.mdx | 4 + docs/source/quicktour.mdx | 10 +- docs/source/saving-and-reading-results.mdx | 14 - .../use-inference-providers-as-backend.mdx | 6 +- docs/source/use-litellm-as-backend.mdx | 27 +- docs/source/use-sglang-as-backend.mdx | 50 +- docs/source/use-vllm-as-backend.mdx | 96 +-- docs/source/using-the-python-api.mdx | 2 +- examples/custom_tasks_tests.py | 62 -- examples/model_configs/endpoint_model.yaml | 42 +- .../endpoint_model_reuse_existing.yaml | 5 + .../model_configs/inference_providers.yaml | 4 +- examples/model_configs/litellm_model.yaml | 11 +- examples/model_configs/peft_model.yaml | 27 +- examples/model_configs/quantized_model.yaml | 17 +- examples/model_configs/serverless_model.yaml | 3 + .../serverless_model_with_openai.yaml | 5 + .../model_configs/sglang_model_config.yaml | 39 +- examples/model_configs/tgi_model.yaml | 9 +- .../model_configs/transformers_model.yaml | 22 +- examples/model_configs/vllm_model_config.yaml | 36 +- examples/nanotron/custom_evaluation_tasks.py | 2 +- examples/test_tasks.txt | 27 - pyproject.toml | 12 +- src/lighteval/logging/evaluation_tracker.py | 28 - src/lighteval/logging/info_loggers.py | 3 + src/lighteval/main_accelerate.py | 88 ++- src/lighteval/main_endpoint.py | 181 ++++- src/lighteval/main_sglang.py | 36 +- src/lighteval/main_vllm.py | 42 +- src/lighteval/metrics/llm_as_judge.py | 109 +-- src/lighteval/metrics/metrics.py | 235 +------ src/lighteval/metrics/metrics_sample.py | 240 +------ src/lighteval/metrics/utils/judge_utils.py | 127 ---- src/lighteval/models/dummy/dummy_model.py | 8 +- .../models/endpoints/endpoint_model.py | 115 +++- .../endpoints/inference_providers_model.py | 21 +- src/lighteval/models/endpoints/tgi_model.py | 34 +- src/lighteval/models/litellm_model.py | 46 +- src/lighteval/models/model_input.py | 43 +- src/lighteval/models/model_loader.py | 48 +- src/lighteval/models/sglang/sglang_model.py | 64 +- .../models/transformers/adapter_model.py | 62 +- .../models/transformers/delta_model.py | 22 +- .../models/transformers/transformers_model.py | 538 +++++++++++---- src/lighteval/models/utils.py | 73 -- src/lighteval/models/vllm/vllm_model.py | 72 +- src/lighteval/pipeline.py | 23 +- src/lighteval/tasks/default_prompts.py | 61 -- src/lighteval/tasks/default_tasks.py | 330 ++++----- src/lighteval/tasks/extended/lcb/main.py | 29 +- src/lighteval/tasks/lighteval_task.py | 4 - src/lighteval/tasks/prompt_manager.py | 12 +- src/lighteval/tasks/registry.py | 1 - .../templates/utils/translation_literals.py | 24 +- src/lighteval/utils/utils.py | 17 +- tests/conftest.py | 23 - tests/models/endpoints/test_endpoint_model.py | 55 +- tests/models/endpoints/test_tgi_model.py | 5 +- tests/models/test_abstract_model.py | 3 +- tests/models/test_base_model.py | 6 +- ...lLM2-1.7B-Instruct-results-accelerate.json | 3 - .../SmolLM2-1.7B-Instruct-results-vllm.json | 3 - .../reference_scores/reference_task_scores.py | 639 ++++++++++++++++++ tests/reference_scores/reference_tasks.py | 114 ++++ tests/slow_tests/__init__.py | 21 - tests/slow_tests/test_accelerate_model.py | 105 --- tests/slow_tests/test_vllm_model.py | 103 --- tests/test_main.py | 126 ++++ 75 files changed, 2373 insertions(+), 2267 deletions(-) delete mode 100644 .github/workflows/slow_tests.yaml rename docs/source/{use-huggingface-inference-endpoints-or-tgi-as-backend.mdx => evaluate-the-model-on-a-server-or-container.mdx} (70%) delete mode 100644 examples/custom_tasks_tests.py create mode 100644 examples/model_configs/endpoint_model_reuse_existing.yaml create mode 100644 examples/model_configs/serverless_model.yaml create mode 100644 examples/model_configs/serverless_model_with_openai.yaml delete mode 100644 examples/test_tasks.txt delete mode 100644 src/lighteval/metrics/utils/judge_utils.py delete mode 100644 tests/conftest.py delete mode 100644 tests/reference_scores/SmolLM2-1.7B-Instruct-results-accelerate.json delete mode 100644 tests/reference_scores/SmolLM2-1.7B-Instruct-results-vllm.json create mode 100644 tests/reference_scores/reference_task_scores.py create mode 100644 tests/reference_scores/reference_tasks.py delete mode 100644 tests/slow_tests/__init__.py delete mode 100644 tests/slow_tests/test_accelerate_model.py delete mode 100644 tests/slow_tests/test_vllm_model.py create mode 100644 tests/test_main.py diff --git a/.github/workflows/slow_tests.yaml b/.github/workflows/slow_tests.yaml deleted file mode 100644 index 6ca42ce43..000000000 --- a/.github/workflows/slow_tests.yaml +++ /dev/null @@ -1,47 +0,0 @@ -name: Slow end to end tests - -on: - push: - branches: - - main - - v*-release - pull_request: - branches: - - main - -jobs: - run_tests: - name: Run tests - runs-on: 'aws-g4dn-2xlarge-use1-public-80' - steps: - - name: Install Git LFS - run: | - if ! command -v git-lfs &> /dev/null; then - echo "Installing Git LFS..." - sudo apt-get update && sudo apt-get install -y git-lfs - git lfs install - else - echo "Git LFS already installed." - fi - - - name: Checkout repository - uses: actions/checkout@v4 - with: - lfs: true - - - name: Install uv - uses: astral-sh/setup-uv@v5 - with: - enable-cache: true - - - name: Install the project - run: uv sync --extra dev - - - name: Ensure cache directories exist - run: mkdir -p cache/models cache/datasets - - - name: Run tests - env: - HF_HOME: "cache/models" - HF_DATASETS_CACHE: "cache/datasets" - run: uv run pytest --disable-pytest-warnings --runslow tests/slow_tests diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index e1a2cad51..5f270d833 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -11,49 +11,36 @@ on: jobs: run_tests: - name: Run tests - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - lfs: true - - - name: Cache Hugging Face models - uses: actions/cache@v4 - with: - path: cache/models - key: hf-models-${{ runner.os }}-${{ github.ref }} - restore-keys: hf-models-${{ runner.os }}- - - - name: Cache Hugging Face datasets - uses: actions/cache@v4 - with: - path: cache/datasets - key: hf-datasets-${{ runner.os }}-${{ github.ref }} - restore-keys: hf-datasets-${{ runner.os }}- - - - name: Cache uv virtual environment - uses: actions/cache@v4 - with: - path: .venv - key: uv-env-${{ runner.os }}-${{ hashFiles('pyproject.toml') }} - restore-keys: uv-env-${{ runner.os }}- - - - name: Install uv - uses: astral-sh/setup-uv@v5 - with: - enable-cache: true - - - name: Install the project - run: uv sync --extra dev - - - name: Ensure cache directories exist - run: mkdir -p cache/models cache/datasets - - - name: Run tests - env: - HF_TEST_TOKEN: ${{ secrets.HF_TEST_TOKEN }} - HF_HOME: "cache/models" - HF_DATASETS_CACHE: "cache/datasets" - run: uv run pytest -x --disable-pytest-warnings + name: Run tests + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v3 + with: + lfs: 'true' + - name: Setup Python environment + uses: actions/setup-python@v4 + with: + python-version: '3.10' + cache: 'pip' + - name: Install lighteval in editable mode + run: | + pip install -e .[dev,extended_tasks,multilingual,litellm] + - name: Get cached files + uses: actions/cache@v4 + id: get-cache + with: + path: "cache" + key: test-cache-HF + - name: Test + env: + HF_TEST_TOKEN: ${{ secrets.HF_TEST_TOKEN }} + HF_HOME: "cache/models" + HF_DATASETS_CACHE: "cache/datasets" + run: | # PYTHONPATH="${PYTHONPATH}:src" HF_DATASETS_CACHE="cache/datasets" HF_HOME="cache/models" + python -m pytest -x --disable-pytest-warnings + - name: Write cache + uses: actions/cache@v4 + with: + path: "cache" + key: test-cache-HF diff --git a/README.md b/README.md index bab41e036..42f6dcaf8 100644 --- a/README.md +++ b/README.md @@ -87,7 +87,7 @@ Here’s a quick command to evaluate using the Accelerate backend: ```shell lighteval accelerate \ - "model_name=gpt2" \ + "pretrained=gpt2" \ "leaderboard|truthfulqa:mc|0|0" ``` diff --git a/docs/source/_toctree.yml b/docs/source/_toctree.yml index 8a1c24c8e..6bd2196d5 100644 --- a/docs/source/_toctree.yml +++ b/docs/source/_toctree.yml @@ -23,8 +23,8 @@ title: Use vllm as backend - local: use-sglang-as-backend title: Use SGLang as backend - - local: use-huggingface-inference-endpoints-or-tgi-as-backend - title: Use Hugging Face inference endpoints or TGI as backend + - local: evaluate-the-model-on-a-server-or-container + title: Evaluate on Server - local: contributing-to-multilingual-evaluations title: Contributing to multilingual evaluations title: Guides diff --git a/docs/source/adding-a-custom-task.mdx b/docs/source/adding-a-custom-task.mdx index 3c34c5bfa..e5160024d 100644 --- a/docs/source/adding-a-custom-task.mdx +++ b/docs/source/adding-a-custom-task.mdx @@ -171,7 +171,7 @@ Once your file is created you can then run the evaluation with the following com ```bash lighteval accelerate \ - "model_name=HuggingFaceH4/zephyr-7b-beta" \ + "pretrained=HuggingFaceH4/zephyr-7b-beta" \ "community|{custom_task}|{fewshots}|{truncate_few_shot}" \ --custom-tasks {path_to_your_custom_task_file} ``` diff --git a/docs/source/use-huggingface-inference-endpoints-or-tgi-as-backend.mdx b/docs/source/evaluate-the-model-on-a-server-or-container.mdx similarity index 70% rename from docs/source/use-huggingface-inference-endpoints-or-tgi-as-backend.mdx rename to docs/source/evaluate-the-model-on-a-server-or-container.mdx index 232bcd73a..23c658b4e 100644 --- a/docs/source/use-huggingface-inference-endpoints-or-tgi-as-backend.mdx +++ b/docs/source/evaluate-the-model-on-a-server-or-container.mdx @@ -25,12 +25,15 @@ be deleted afterwards). __configuration file example:__ ```yaml -model_parameters: - reuse_existing: false # if true, ignore all params in instance, and don't delete the endpoint after evaluation -# endpoint_name: "llama-2-7B-lighteval" # needs to be lower case without special characters +model: + base_params: + # Pass either model_name, or endpoint_name and true reuse_existing + # endpoint_name: "llama-2-7B-lighteval" # needs to be lower case without special characters + # reuse_existing: true # defaults to false; if true, ignore all params in instance, and don't delete the endpoint after evaluation model_name: "meta-llama/Llama-2-7b-hf" - revision: "main" # defaults to "main" + # revision: "main" # defaults to "main" dtype: "float16" # can be any of "awq", "eetq", "gptq", "4bit' or "8bit" (will use bitsandbytes), "bfloat16" or "float16" + instance: accelerator: "gpu" region: "eu-west-1" vendor: "aws" @@ -41,7 +44,7 @@ model_parameters: namespace: null # The namespace under which to launch the endpoint. Defaults to the current user's namespace image_url: null # Optionally specify the docker image to use when launching the endpoint model. E.g., launching models with later releases of the TGI container with support for newer models. env_vars: - null # Optional environment variables to include when launching the endpoint. e.g., `MAX_INPUT_LENGTH: 2048` + null # Optional environment variables to include when launching the endpoint. e.g., `MAX_INPUT_LENGTH: 2048` ``` ### Text Generation Inference (TGI) @@ -52,8 +55,25 @@ serverless inference. __configuration file example:__ ```yaml -model_parameters: +model: + instance: inference_server_address: "" inference_server_auth: null model_id: null # Optional, only required if the TGI container was launched with model_id pointing to a local directory ``` + +### OpenAI API + +Lighteval also supports evaluating models on the OpenAI API. To do so you need to set your OpenAI API key in the environment variable. + +```bash +export OPENAI_API_KEY={your_key} +``` + +And then run the following command: + +```bash +lighteval endpoint openai \ + {model-name} \ + +``` diff --git a/docs/source/package_reference/models.mdx b/docs/source/package_reference/models.mdx index 837ea4340..9feed4652 100644 --- a/docs/source/package_reference/models.mdx +++ b/docs/source/package_reference/models.mdx @@ -31,6 +31,10 @@ ### Open AI Models [[autodoc]] models.endpoints.openai_model.OpenAIClient +## Nanotron Model +### NanotronLightevalModel +[[autodoc]] models.nanotron.nanotron_model.NanotronLightevalModel + ## VLLM Model ### VLLMModel [[autodoc]] models.vllm.vllm_model.VLLMModelConfig diff --git a/docs/source/quicktour.mdx b/docs/source/quicktour.mdx index de5791b46..b6ec6c7d8 100644 --- a/docs/source/quicktour.mdx +++ b/docs/source/quicktour.mdx @@ -27,7 +27,7 @@ To evaluate `GPT-2` on the Truthful QA benchmark with [🤗 ```bash lighteval accelerate \ - "model_name=openai-community/gpt2" \ + "pretrained=gpt2" \ "leaderboard|truthfulqa:mc|0|0" ``` @@ -59,7 +59,7 @@ When specifying a path to file, it should start with `./`. ```bash lighteval accelerate \ - "model_name=openai-community/gpt2" \ + "pretrained=gpt2" \ ./path/to/lighteval/examples/tasks/recommended_set.txt # or, e.g., "leaderboard|truthfulqa:mc|0|0|,leaderboard|gsm8k|3|1" ``` @@ -79,7 +79,7 @@ You can then evaluate a model using data parallelism on 8 GPUs like follows: ```bash accelerate launch --multi_gpu --num_processes=8 -m \ lighteval accelerate \ - "model_name=openai-community/gpt2" \ + "pretrained=gpt2" \ "leaderboard|truthfulqa:mc|0|0" ``` @@ -92,7 +92,7 @@ To evaluate a model using pipeline parallelism on 2 or more GPUs, run: ```bash lighteval accelerate \ - "model_name=openai-community/gpt2,model_parallel=True" \ + "pretrained=gpt2,model_parallel=True" \ "leaderboard|truthfulqa:mc|0|0" ``` @@ -129,7 +129,7 @@ accelerate). - **add_special_tokens** (bool, optional, defaults to True): Whether to add special tokens to the input sequences. If `None`, the default value will be set to `True` for seq2seq models (e.g. T5) and `False` for causal models. -- **model_parallel** (bool, optional, defaults to None): +- **model_parallel** (bool, optional, defaults to False): True/False: force to use or not the `accelerate` library to load a large model across multiple devices. Default: None which corresponds to comparing the number of processes with diff --git a/docs/source/saving-and-reading-results.mdx b/docs/source/saving-and-reading-results.mdx index 525846ac9..993d7577b 100644 --- a/docs/source/saving-and-reading-results.mdx +++ b/docs/source/saving-and-reading-results.mdx @@ -31,20 +31,6 @@ This will create a Tensorboard dashboard in a HF org set with the `--results-org option. -## Pushing results to WandB - -You can push the results to WandB by setting `--wandb`. This will init a WandB -run and log the results. - -Wandb args need to be set in your env variables. - -``` -export WANDB_PROJECT="lighteval" -``` - -You can find a list of variable in the [wandb documentation](https://docs.wandb.ai/guides/track/environment-variables/). - - ## How to load and investigate details ### Load from local detail files diff --git a/docs/source/use-inference-providers-as-backend.mdx b/docs/source/use-inference-providers-as-backend.mdx index 8f125a00b..0cf69f7cf 100644 --- a/docs/source/use-inference-providers-as-backend.mdx +++ b/docs/source/use-inference-providers-as-backend.mdx @@ -11,7 +11,7 @@ Lighteval allows to use Hugging Face's Inference Providers to evaluate llms on s ```bash lighteval endpoint inference-providers \ - "model_name=deepseek-ai/DeepSeek-R1,provider=hf-inference" \ + "model=deepseek-ai/DeepSeek-R1,provider=hf-inference" \ "lighteval|gsm8k|0|0" ``` @@ -28,13 +28,13 @@ lighteval endpoint inference-providers \ with the following config file: ```yaml -model_parameters: +model: model_name: "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B" provider: "novita" timeout: null proxies: null parallel_calls_count: 10 - generation_parameters: + generation: temperature: 0.8 top_k: 10 max_new_tokens: 10000 diff --git a/docs/source/use-litellm-as-backend.mdx b/docs/source/use-litellm-as-backend.mdx index 783e9bcd9..dfebf2103 100644 --- a/docs/source/use-litellm-as-backend.mdx +++ b/docs/source/use-litellm-as-backend.mdx @@ -10,14 +10,10 @@ Documentation for available APIs and compatible endpoints can be found [here](ht ```bash lighteval endpoint litellm \ - "provider=openai,model_name=gpt-3.5-turbo" \ - "lighteval|gsm8k|0|0" \ - --use-chat-template + "gpt-3.5-turbo" \ + "lighteval|gsm8k|0|0" ``` -> [!WARNING] -> `--use-chat-template` is required for litellm to work properly. - ## Using a config file Litellm allows generation with any OpenAI compatible endpoint, for example you @@ -26,16 +22,17 @@ can evaluate a model running on a local vllm server. To do so you will need to use a config file like so: ```yaml -model_parameters: +model: + base_params: model_name: "openai/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B" base_url: "URL OF THE ENDPOINT YOU WANT TO USE" api_key: "" # remove or keep empty as needed - generation_parameters: - temperature: 0.5 - max_new_tokens: 256 - stop_tokens: [""] - top_p: 0.9 - seed: 0 - repetition_penalty: 1.0 - frequency_penalty: 0.0 + generation: + temperature: 0.5 + max_new_tokens: 256 + stop_tokens: [""] + top_p: 0.9 + seed: 0 + repetition_penalty: 1.0 + frequency_penalty: 0.0 ``` diff --git a/docs/source/use-sglang-as-backend.mdx b/docs/source/use-sglang-as-backend.mdx index e9e629eab..595e4cfb9 100644 --- a/docs/source/use-sglang-as-backend.mdx +++ b/docs/source/use-sglang-as-backend.mdx @@ -5,7 +5,7 @@ To use, simply change the `model_args` to reflect the arguments you want to pass ```bash lighteval sglang \ - "model_name=HuggingFaceH4/zephyr-7b-beta,dtype=float16" \ + "pretrained=HuggingFaceH4/zephyr-7b-beta,dtype=float16" \ "leaderboard|truthfulqa:mc|0|0" ``` @@ -17,7 +17,7 @@ For example if you have 4 GPUs you can split it across using `tp_size`: ```bash lighteval sglang \ - "model_name=HuggingFaceH4/zephyr-7b-beta,dtype=float16,tp_size=4" \ + "pretrained=HuggingFaceH4/zephyr-7b-beta,dtype=float16,tp_size=4" \ "leaderboard|truthfulqa:mc|0|0" ``` @@ -25,7 +25,7 @@ Or, if your model fits on a single GPU, you can use `dp_size` to speed up the ev ```bash lighteval sglang \ - "model_name=HuggingFaceH4/zephyr-7b-beta,dtype=float16,dp_size=4" \ + "pretrained=HuggingFaceH4/zephyr-7b-beta,dtype=float16,dp_size=4" \ "leaderboard|truthfulqa:mc|0|0" ``` @@ -40,38 +40,20 @@ lighteval sglang \ "leaderboard|truthfulqa:mc|0|0" ``` -> [!TIP] -> Documentation for the config file of sglang can be found [here](https://docs.sglang.ai/backend/server_arguments.html) - ```yaml -model_parameters: - model_name: "HuggingFaceTB/SmolLM-1.7B-Instruct" - dtype: "auto" - tp_size: 1 - dp_size: 1 - context_length: null - random_seed: 1 - trust_remote_code: False - use_chat_template: False - device: "cuda" - skip_tokenizer_init: False - kv_cache_dtype: "auto" - add_special_tokens: True - pairwise_tokenization: False - sampling_backend: null - attention_backend: null - mem_fraction_static: 0.8 - chunked_prefill_size: 4096 - generation_parameters: - max_new_tokens: 1024 - min_new_tokens: 0 - temperature: 1.0 - top_k: 50 - min_p: 0.0 - top_p: 1.0 - presence_penalty: 0.0 - repetition_penalty: 1.0 - frequency_penalty: 0.0 +model: # Model specific parameters + base_params: + model_args: "pretrained=HuggingFaceTB/SmolLM-1.7B,dtype=float16,chunked_prefill_size=4096,mem_fraction_static=0.9" # Model args that you would pass in the command line + generation: # Generation specific parameters + temperature: 0.3 + repetition_penalty: 1.0 + frequency_penalty: 0.0 + presence_penalty: 0.0 + top_k: -1 + min_p: 0.0 + top_p: 0.9 + max_new_tokens: 256 + stop_tokens: ["", ""] ``` > [!WARNING] diff --git a/docs/source/use-vllm-as-backend.mdx b/docs/source/use-vllm-as-backend.mdx index 51f7364c2..9d4bb8632 100644 --- a/docs/source/use-vllm-as-backend.mdx +++ b/docs/source/use-vllm-as-backend.mdx @@ -3,13 +3,9 @@ Lighteval allows you to use `vllm` as backend allowing great speedups. To use, simply change the `model_args` to reflect the arguments you want to pass to vllm. - -> [!TIP] -> Documentation for vllm engine args can be found [here](https://docs.vllm.ai/en/latest/serving/engine_args.html) - ```bash lighteval vllm \ - "model_name=HuggingFaceH4/zephyr-7b-beta,dtype=float16" \ + "pretrained=HuggingFaceH4/zephyr-7b-beta,dtype=float16" \ "leaderboard|truthfulqa:mc|0|0" ``` @@ -21,7 +17,7 @@ For example if you have 4 GPUs you can split it across using `tensor_parallelism ```bash export VLLM_WORKER_MULTIPROC_METHOD=spawn && lighteval vllm \ - "model_name=HuggingFaceH4/zephyr-7b-beta,dtype=float16,tensor_parallel_size=4" \ + "pretrained=HuggingFaceH4/zephyr-7b-beta,dtype=float16,tensor_parallel_size=4" \ "leaderboard|truthfulqa:mc|0|0" ``` @@ -29,7 +25,7 @@ Or, if your model fits on a single GPU, you can use `data_parallelism` to speed ```bash lighteval vllm \ - "model_name=HuggingFaceH4/zephyr-7b-beta,dtype=float16,data_parallel_size=4" \ + "pretrained=HuggingFaceH4/zephyr-7b-beta,dtype=float16,data_parallel_size=4" \ "leaderboard|truthfulqa:mc|0|0" ``` @@ -45,35 +41,18 @@ lighteval vllm \ ``` ```yaml -model_parameters: - model_name: "HuggingFaceTB/SmolLM-1.7B-Instruct" - revision: "main" - dtype: "bfloat16" - tensor_parallel_size: 1 - data_parallel_size: 1 - pipeline_parallel_size: 1 - gpu_memory_utilization: 0.9 - max_model_length: 2048 - swap_space: 4 - seed: 1 - trust_remote_code: True - use_chat_template: True - add_special_tokens: True - multichoice_continuations_start_space: True - pairwise_tokenization: True - subfolder: null - generation_parameters: - presence_penalty: 0.0 - repetition_penalty: 1.0 - frequency_penalty: 0.0 - temperature: 1.0 - top_k: 50 - min_p: 0.0 - top_p: 1.0 - seed: 42 - stop_tokens: null - max_new_tokens: 1024 - min_new_tokens: 0 +model: # Model specific parameters + base_params: + model_args: "pretrained=HuggingFaceTB/SmolLM-1.7B,revision=main,dtype=bfloat16" # Model args that you would pass in the command line + generation: # Generation specific parameters + temperature: 0.3 + repetition_penalty: 1.0 + frequency_penalty: 0.0 + presence_penalty: 0.0 + seed: 42 + top_k: 0 + min_p: 0.0 + top_p: 0.9 ``` > [!WARNING] @@ -87,38 +66,21 @@ For special kinds of metrics like `Pass@K` or LiveCodeBench's `codegen` metric, generations. This can be done in the `yaml` file in the following way: ```yaml -model_parameters: - model_name: "HuggingFaceTB/SmolLM-1.7B-Instruct" - revision: "main" - dtype: "bfloat16" - tensor_parallel_size: 1 - data_parallel_size: 1 - pipeline_parallel_size: 1 - gpu_memory_utilization: 0.9 - max_model_length: 2048 - swap_space: 4 - seed: 1 - trust_remote_code: True - use_chat_template: True - add_special_tokens: True - multichoice_continuations_start_space: True - pairwise_tokenization: True - subfolder: null - generation_parameters: - presence_penalty: 0.0 - repetition_penalty: 1.0 - frequency_penalty: 0.0 - temperature: 1.0 - top_k: 50 - min_p: 0.0 - top_p: 1.0 - seed: 42 - stop_tokens: null - max_new_tokens: 1024 - min_new_tokens: 0 -metric_options: # Optional metric arguments +model: # Model specific parameters + base_params: + model_args: "pretrained=HuggingFaceTB/SmolLM-1.7B,revision=main,dtype=bfloat16" # Model args that you would pass in the command line + generation: # Generation specific parameters + temperature: 0.3 + repetition_penalty: 1.0 + frequency_penalty: 0.0 + presence_penalty: 0.0 + seed: 42 + top_k: 0 + min_p: 0.0 + top_p: 0.9 + metric_options: # Optional metric arguments codegen_pass@1:16: - num_samples: 16 + num_samples: 16 ``` An optional key `metric_options` can be passed in the yaml file, diff --git a/docs/source/using-the-python-api.mdx b/docs/source/using-the-python-api.mdx index e2ee0adae..9d8e3e63f 100644 --- a/docs/source/using-the-python-api.mdx +++ b/docs/source/using-the-python-api.mdx @@ -40,7 +40,7 @@ def main(): ) model_config = VLLMModelConfig( - model_name="HuggingFaceH4/zephyr-7b-beta", + pretrained="HuggingFaceH4/zephyr-7b-beta", dtype="float16", use_chat_template=True, ) diff --git a/examples/custom_tasks_tests.py b/examples/custom_tasks_tests.py deleted file mode 100644 index cf0175785..000000000 --- a/examples/custom_tasks_tests.py +++ /dev/null @@ -1,62 +0,0 @@ -# MIT License - -# Copyright (c) 2024 The HuggingFace Team - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -import lighteval.tasks.default_prompts as prompt -from lighteval.metrics.metrics import Metrics -from lighteval.tasks.lighteval_task import LightevalTaskConfig - - -gsm8k_test = LightevalTaskConfig( - name="gsm8k", - suite=["test"], - prompt_function=prompt.gsm8k, - hf_repo="gsm8k", - hf_subset="main", - hf_avail_splits=["train", "test"], - evaluation_splits=["test"], - few_shots_split=None, - few_shots_select="random_sampling_from_train", - generation_size=512, - metric=[Metrics.expr_gold_metric], - stop_sequence=None, - trust_dataset=True, - version=0, -) - -gpqa_diamond_test = LightevalTaskConfig( - name="gpqa:diamond", - suite=["test"], - prompt_function=prompt.gpqa_instruct, - hf_repo="Idavidrein/gpqa", - hf_subset="gpqa_diamond", - hf_avail_splits=["train"], - evaluation_splits=["train"], - few_shots_split=None, - few_shots_select=None, - generation_size=2048, - metric=[Metrics.gpqa_instruct_metric], - stop_sequence=[], # no stop sequence, will use eos token - trust_dataset=True, - version=0, -) - -TASKS_TABLE = [gsm8k_test, gpqa_diamond_test] diff --git a/examples/model_configs/endpoint_model.yaml b/examples/model_configs/endpoint_model.yaml index 0b17a0be9..08cb5fac3 100644 --- a/examples/model_configs/endpoint_model.yaml +++ b/examples/model_configs/endpoint_model.yaml @@ -1,22 +1,20 @@ -model_parameters: - reuse_existing: false # if true, ignore all params in instance, and don't delete the endpoint after evaluation - # endpoint_name: "llama-2-7B-lighteval" # needs to be lower case without special characters - - model_name: "meta-llama/Llama-2-7b-hf" - revision: "main" # defaults to "main" - dtype: "float16" # can be any of "awq", "eetq", "gptq", "4bit' or "8bit" (will use bitsandbytes), "bfloat16" or "float16" - accelerator: "gpu" - region: "eu-west-1" - vendor: "aws" - instance_type: "nvidia-a10g" - instance_size: "x1" - framework: "pytorch" - endpoint_type: "protected" - namespace: null # The namespace under which to launch the endpoint. Defaults to the current user's namespace - image_url: null # Optionally specify the docker image to use when launching the endpoint model. E.g., launching models with later releases of the TGI container with support for newer models. - env_vars: - null # Optional environment variables to include when launching the endpoint. e.g., `MAX_INPUT_LENGTH: 2048` - generation_parameters: - max_new_tokens: 256 # maximum number of tokens to generate - temperature: 0.2 - top_p: 0.9 +model: + base_params: + # Pass either model_name, or endpoint_name and true reuse_existing + # endpoint_name: "llama-2-7B-lighteval" # needs to be lower case without special characters + # reuse_existing: true # defaults to false; if true, ignore all params in instance, and don't delete the endpoint after evaluation + model_name: "meta-llama/Llama-2-7b-hf" + # revision: "main" # defaults to "main" + dtype: "float16" # can be any of "awq", "eetq", "gptq", "4bit' or "8bit" (will use bitsandbytes), "bfloat16" or "float16" + instance: + accelerator: "gpu" + region: "eu-west-1" + vendor: "aws" + instance_type: "nvidia-a10g" + instance_size: "x1" + framework: "pytorch" + endpoint_type: "protected" + namespace: null # The namespace under which to launch the endpoint. Defaults to the current user's namespace + image_url: null # Optionally specify the docker image to use when launching the endpoint model. E.g., launching models with later releases of the TGI container with support for newer models. + env_vars: + null # Optional environment variables to include when launching the endpoint. e.g., `MAX_INPUT_LENGTH: 2048` diff --git a/examples/model_configs/endpoint_model_reuse_existing.yaml b/examples/model_configs/endpoint_model_reuse_existing.yaml new file mode 100644 index 000000000..8b47354d2 --- /dev/null +++ b/examples/model_configs/endpoint_model_reuse_existing.yaml @@ -0,0 +1,5 @@ +model: + base_params: + # Pass either model_name, or endpoint_name and true reuse_existing + endpoint_name: "llama-2-7B-lighteval" # needs to be lower case without special characters + reuse_existing: true # defaults to false; if true, ignore all params in instance, and don't delete the endpoint after evaluation diff --git a/examples/model_configs/inference_providers.yaml b/examples/model_configs/inference_providers.yaml index c08185f49..4a6d3e5cb 100644 --- a/examples/model_configs/inference_providers.yaml +++ b/examples/model_configs/inference_providers.yaml @@ -1,10 +1,10 @@ -model_parameters: +model: model_name: "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B" provider: "novita" timeout: null proxies: null parallel_calls_count: 20 - generation_parameters: + generation: temperature: 0.8 top_k: 10 max_new_tokens: 10000 diff --git a/examples/model_configs/litellm_model.yaml b/examples/model_configs/litellm_model.yaml index b0cb25199..6d2b7a30f 100644 --- a/examples/model_configs/litellm_model.yaml +++ b/examples/model_configs/litellm_model.yaml @@ -1,10 +1,11 @@ -model_parameters: - model_name: "openai/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B" - provider: "openai" - base_url: "https://router.huggingface.co/hf-inference/v1" - generation_parameters: +model: + base_params: + model_name: "openai/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B" + base_url: "https://router.huggingface.co/hf-inference/v1" + generation: temperature: 0.5 max_new_tokens: 256 + stop_tokens: [""] top_p: 0.9 seed: 0 repetition_penalty: 1.0 diff --git a/examples/model_configs/peft_model.yaml b/examples/model_configs/peft_model.yaml index c6f604993..81205818a 100644 --- a/examples/model_configs/peft_model.yaml +++ b/examples/model_configs/peft_model.yaml @@ -1,16 +1,11 @@ -model_parameters: - model_name: "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B" # pretrained=model_name,trust_remote_code=boolean,revision=revision_to_use,model_parallel=True ... For a PEFT model, the pretrained model should be the one trained with PEFT and the base model below will contain the original model on which the adapters will be applied. - tokenizer: null # name of tokenier to use if defferent from the model's default - subfolder: null # subfolder in the model's directory to use - dtype: "float16" # Specifying the model to be loaded in 4 bit uses BitsAndBytesConfig. The other option is to use "8bit" quantization. - compile: true - revision: "main" # revision to use - trust_remote_code: true # Trust remote code - model_parallel: null # Model parallel - use_chat_template: true # Use chat template - max_length: 2048 # maximum length of the input text and the generated text - - # should go in generation - max_generation_toks: 256 # maximum number of tokens to generate - #use_chat_template: true # Use chat template - batch_size: 10 # batch size to use +model: + base_params: + model_args: "pretrained=predibase/customer_support,revision=main" # pretrained=model_name,trust_remote_code=boolean,revision=revision_to_use,model_parallel=True ... For a PEFT model, the pretrained model should be the one trained with PEFT and the base model below will contain the original model on which the adapters will be applied. + dtype: "4bit" # Specifying the model to be loaded in 4 bit uses BitsAndBytesConfig. The other option is to use "8bit" quantization. + compile: true + merged_weights: # Ignore this section if you are not using PEFT models + delta_weights: false # set to True of your model should be merged with a base model, also need to provide the base model name + adapter_weights: true # set to True of your model has been trained with peft, also need to provide the base model name + base_model: "mistralai/Mistral-7B-v0.1" # path to the base_model - needs to be specified only if delta_weights or adapter_weights is set to True + generation: + multichoice_continuations_start_space: null # If true/false, will force multiple choice continuations to start/not start with a space. If none, will do nothing diff --git a/examples/model_configs/quantized_model.yaml b/examples/model_configs/quantized_model.yaml index 676c92aaf..3bc6b2c37 100644 --- a/examples/model_configs/quantized_model.yaml +++ b/examples/model_configs/quantized_model.yaml @@ -1,6 +1,11 @@ -model_parameters: - model_name: "HuggingFaceH4/zephyr-7b-beta" # pretrained=model_name,trust_remote_code=boolean,revision=revision_to_use,model_parallel=True ... - revision: "main" # revision to use - dtype: "4bit" # Specifying the model to be loaded in 4 bit uses BitsAndBytesConfig. The other option is to use "8bit" quantization. - compile: true - batch_size: 1 # batch size to use +model: + base_params: + model_args: "pretrained=HuggingFaceH4/zephyr-7b-beta,revision=main" # pretrained=model_name,trust_remote_code=boolean,revision=revision_to_use,model_parallel=True ... + dtype: "4bit" # Specifying the model to be loaded in 4 bit uses BitsAndBytesConfig. The other option is to use "8bit" quantization. + compile: true + merged_weights: # Ignore this section if you are not using PEFT models + delta_weights: false # set to True of your model should be merged with a base model, also need to provide the base model name + adapter_weights: false # set to True of your model has been trained with peft, also need to provide the base model name + base_model: null # path to the base_model - needs to be specified only if delta_weights or adapter_weights is set to True + generation: + multichoice_continuations_start_space: null # If true/false, will force multiple choice continuations to start/not start with a space. If none, will do nothing diff --git a/examples/model_configs/serverless_model.yaml b/examples/model_configs/serverless_model.yaml new file mode 100644 index 000000000..af1652e1e --- /dev/null +++ b/examples/model_configs/serverless_model.yaml @@ -0,0 +1,3 @@ +model: + base_params: + model_name: "meta-llama/Llama-3.1-8B-Instruct" #Qwen/Qwen2.5-14B" #Qwen/Qwen2.5-7B" diff --git a/examples/model_configs/serverless_model_with_openai.yaml b/examples/model_configs/serverless_model_with_openai.yaml new file mode 100644 index 000000000..0e6947dff --- /dev/null +++ b/examples/model_configs/serverless_model_with_openai.yaml @@ -0,0 +1,5 @@ +model: + model_name: "deepseek-ai/DeepSeek-R1" #meta-llama/Llama-3.1-8B-Instruct" #Qwen/Qwen2.5-14B" #Qwen/Qwen2.5-7B" +api: + base_url: "https://huggingface.co/api/inference-proxy/together" + api_key: "hf_" diff --git a/examples/model_configs/sglang_model_config.yaml b/examples/model_configs/sglang_model_config.yaml index 159847e12..2a980e3a8 100644 --- a/examples/model_configs/sglang_model_config.yaml +++ b/examples/model_configs/sglang_model_config.yaml @@ -1,30 +1,13 @@ -model_parameters: - model_name: "HuggingFaceTB/SmolLM-1.7B-Instruct" - dtype: "auto" - tp_size: 1 - dp_size: 1 - context_length: null - random_seed: 1 - trust_remote_code: False - use_chat_template: False - device: "cuda" - skip_tokenizer_init: False - kv_cache_dtype: "auto" - add_special_tokens: True - pairwise_tokenization: False - sampling_backend: null - attention_backend: null - mem_fraction_static: 0.8 - chunked_prefill_size: 4096 - generation_parameters: - max_new_tokens: 1024 - min_new_tokens: 0 - temperature: 1.0 - top_k: 50 - min_p: 0.0 - top_p: 1.0 - presence_penalty: 0.0 +model: + base_params: + model_args: "pretrained=HuggingFaceTB/SmolLM-1.7B,dtype=float16,chunked_prefill_size=4096,mem_fraction_static=0.9" + generation: + temperature: 0.3 repetition_penalty: 1.0 frequency_penalty: 0.0 -metrics_options: - yo: null + presence_penalty: 0.0 + top_k: -1 + min_p: 0.0 + top_p: 0.9 + max_new_tokens: 256 + stop_tokens: ["", ""] diff --git a/examples/model_configs/tgi_model.yaml b/examples/model_configs/tgi_model.yaml index 34dbaa831..8db5654d8 100644 --- a/examples/model_configs/tgi_model.yaml +++ b/examples/model_configs/tgi_model.yaml @@ -1,4 +1,5 @@ -model_parameters: - inference_server_address: "" - inference_server_auth: null - model_name: null # Optional, only required if the TGI container was launched with model_id pointing to a local directory +model: + instance: + inference_server_address: "" + inference_server_auth: null + model_id: null # Optional, only required if the TGI container was launched with model_id pointing to a local directory diff --git a/examples/model_configs/transformers_model.yaml b/examples/model_configs/transformers_model.yaml index 1687e108b..377e348b3 100644 --- a/examples/model_configs/transformers_model.yaml +++ b/examples/model_configs/transformers_model.yaml @@ -1,10 +1,12 @@ -model_parameters: - model_name: "HuggingFaceTB/SmolLM2-1.7B-Instruct" - revision: "57aa3c6599c53705406c648e7acca7e11dc45ea3" - dtype: "float16" - compile: false - model_parallel: false - multichoice_continuations_start_space: null # If true/false, will force multiple choice continuations to start/not start with a space. If none, will do nothing - generation_parameters: - temperature: 0.2 - top_p: 0.9 +model: + base_params: + model_args: "pretrained=HuggingFaceTB/SmolLM-1.7B,revision=main" # pretrained=model_name,trust_remote_code=boolean,revision=revision_to_use,model_parallel=True ... + dtype: "bfloat16" + compile: true + multichoice_continuations_start_space: null # If true/false, will force multiple choice continuations to start/not start with a space. If none, will do nothing + merged_weights: # Ignore this section if you are not using PEFT models + delta_weights: false # set to True of your model should be merged with a base model, also need to provide the base model name + adapter_weights: false # set to True of your model has been trained with peft, also need to provide the base model name + base_model: null # path to the base_model + generation: + temperature: 0.5 diff --git a/examples/model_configs/vllm_model_config.yaml b/examples/model_configs/vllm_model_config.yaml index fb1d27fb3..76c6ff9ad 100644 --- a/examples/model_configs/vllm_model_config.yaml +++ b/examples/model_configs/vllm_model_config.yaml @@ -1,33 +1,13 @@ -model_parameters: - model_name: "HuggingFaceTB/SmolLM2-1.7B-Instruct" - revision: "57aa3c6599c53705406c648e7acca7e11dc45ea3" - dtype: "float16" - tensor_parallel_size: 1 - data_parallel_size: 1 - pipeline_parallel_size: 1 - gpu_memory_utilization: 0.6 - max_model_length: null - swap_space: 4 - seed: 42 - trust_remote_code: False - use_chat_template: True - add_special_tokens: True - multichoice_continuations_start_space: False - pairwise_tokenization: False - subfolder: null - max_num_seqs: 1 - max_num_batched_tokens: 8192 - generation_parameters: - presence_penalty: 0.0 +model: + base_params: + model_args: "pretrained=HuggingFaceTB/SmolLM-1.7B-Instruct,revision=main,dtype=bfloat16" + generation: + temperature: 0.3 repetition_penalty: 1.0 frequency_penalty: 0.0 - temperature: 0.3 - top_k: null + presence_penalty: 0.0 + seed: 42 + top_k: -1 min_p: 0.0 top_p: 0.9 - seed: 42 - stop_tokens: null max_new_tokens: 2048 - min_new_tokens: 0 -metrics_options: - yo: null diff --git a/examples/nanotron/custom_evaluation_tasks.py b/examples/nanotron/custom_evaluation_tasks.py index 2fd85f69b..e4b1b5ca6 100644 --- a/examples/nanotron/custom_evaluation_tasks.py +++ b/examples/nanotron/custom_evaluation_tasks.py @@ -256,7 +256,7 @@ def __init__( self, name, prompt_function=prompt.math, - hf_repo="DigitalLearningGmbH/MATH-lighteval", + hf_repo="lighteval/MATH", hf_subset=None, metric=[Metrics.quasi_exact_match_math], hf_avail_splits=None, diff --git a/examples/test_tasks.txt b/examples/test_tasks.txt deleted file mode 100644 index 7666c79e4..000000000 --- a/examples/test_tasks.txt +++ /dev/null @@ -1,27 +0,0 @@ -leaderboard|arc:challenge|25|0 -leaderboard|truthfulqa:mc|0|0 -leaderboard|hellaswag|10|0 -leaderboard|mmlu:college_chemistry|5|0 -leaderboard|mmlu:us_foreign_policy|5|0 -lighteval|agieval:aqua-rat|0|0 -lighteval|agieval:logiqa-en|0|0 -lighteval|agieval:lsat-ar|0|0 -lighteval|agieval:lsat-lr|0|0 -lighteval|agieval:lsat-rc|0|0 -lighteval|agieval:sat-en-without-passage|0|0 -lighteval|agieval:sat-en|0|0 -lighteval|bigbench:causal_judgment|3|0 -lighteval|bigbench:date_understanding|3|0 -lighteval|bigbench:disambiguation_qa|3|0 -lighteval|bigbench:geometric_shapes|3|0 -lighteval|bigbench:logical_deduction_five_objects|3|0 -lighteval|bigbench:logical_deduction_seven_objects|3|0 -lighteval|bigbench:movie_recommendation|3|0 -lighteval|bigbench:navigate|3|0 -lighteval|bigbench:ruin_names|3|0 -lighteval|bigbench:salient_translation_error_detection|3|0 -lighteval|bigbench:snarks|3|0 -lighteval|bigbench:temporal_sequences|3|0 -lighteval|bigbench:tracking_shuffled_objects_five_objects|3|0 -lighteval|bigbench:tracking_shuffled_objects_seven_objects|3|0 -test|gsm8k|0|1 diff --git a/pyproject.toml b/pyproject.toml index ba2e95130..ca0f4b0e6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,10 +56,10 @@ dependencies = [ # Base dependencies "transformers>=4.38.0", "accelerate", - "huggingface_hub[hf_xet]", + "huggingface_hub>=0.23.0", "torch>=2.0,<3.0", "GitPython>=3.1.41", # for logging - "datasets>=3.5.0", + "datasets>=2.14.0", "numpy<2", # pinned to avoid incompatibilities # Prettiness "typer", @@ -78,7 +78,6 @@ dependencies = [ "protobuf==3.20.*", # pinned for sentencepiece compat "pycountry", "fsspec>=2023.12.2", - "httpx == 0.27.2", ] [project.optional-dependencies] @@ -94,12 +93,12 @@ nanotron = [ tensorboardX = ["tensorboardX"] vllm = ["vllm>=0.7.0", "ray", "more_itertools"] quality = ["ruff==v0.2.2","pre-commit"] -tests = ["pytest==7.4.0","deepdiff"] -dev = ["lighteval[accelerate,quality,tests,multilingual,math,extended_tasks,vllm]"] +tests = ["pytest==7.4.0"] +dev = ["lighteval[accelerate,quality,tests,multilingual,math,extended_tasks]"] docs = ["hf-doc-builder", "watchdog"] extended_tasks = [ "langdetect", # ifeval - "openai==1.55.2", # llm as a judge using openai models + "openai", # llm as a judge using openai models "tiktoken" ] s3 = ["s3fs"] @@ -110,7 +109,6 @@ multilingual = [ "pyvi", # for vietnamese tokenizer ] math = ["latex2sympy2_extended==1.0.6"] -wandb = ["wandb"] [project.urls] Homepage = "https://github.com/huggingface/lighteval" diff --git a/src/lighteval/logging/evaluation_tracker.py b/src/lighteval/logging/evaluation_tracker.py index 71d734d53..7b812648e 100644 --- a/src/lighteval/logging/evaluation_tracker.py +++ b/src/lighteval/logging/evaluation_tracker.py @@ -126,7 +126,6 @@ def __init__( tensorboard_metric_prefix: str = "eval", public: bool = False, nanotron_run_info: "GeneralArgs" = None, - wandb: bool = False, ) -> None: """Creates all the necessary loggers for evaluation tracking.""" self.details_logger = DetailsLogger() @@ -146,7 +145,6 @@ def __init__( self.should_push_to_hub = push_to_hub self.should_save_details = save_details - self.wandb = wandb self.should_push_results_to_tensorboard = push_to_tensorboard self.tensorboard_repo = f"{hub_results_org}/tensorboard_logs" @@ -155,20 +153,6 @@ def __init__( self.public = public - if wandb is True: - import wandb - - self.wandb_project = os.environ.get("WANDB_PROJECT", None) - - if self.wandb_project is None: - raise ValueError("You need to specify the project name in wandb_args") - - wandb.login() - self.wandb_run = wandb.init( - project=self.wandb_project, - resume="allow", - ) - @property def results(self): config_general = asdict(self.general_config_logger) @@ -238,23 +222,11 @@ def save(self) -> None: results_dict=results_dict, ) - if self.wandb is True: - self.push_to_wandb( - results_dict=results_dict, - details_datasets=details_datasets, - ) - if self.should_push_results_to_tensorboard: self.push_to_tensorboard( results=self.metrics_logger.metric_aggregated, details=self.details_logger.compiled_details ) - def push_to_wandb(self, results_dict: dict, details_datasets: dict) -> None: - self.wandb_run.log( - {**results_dict["results"]}, - ) - self.wandb_run.finish() - def save_results(self, date_id: str, results_dict: dict): output_dir_results = Path(self.output_dir) / "results" / self.general_config_logger.model_name self.fs.mkdirs(output_dir_results, exist_ok=True) diff --git a/src/lighteval/logging/info_loggers.py b/src/lighteval/logging/info_loggers.py index a520fc95a..dc0aec67d 100644 --- a/src/lighteval/logging/info_loggers.py +++ b/src/lighteval/logging/info_loggers.py @@ -77,6 +77,7 @@ class GeneralConfigLogger: # general lighteval_sha: str = None num_fewshot_seeds: int = None + override_batch_size: int = None max_samples: int = None job_id: int = None start_time: float = None @@ -107,6 +108,7 @@ def __init__(self) -> None: def log_args_info( self, num_fewshot_seeds: int, + override_batch_size: Union[None, int], max_samples: Union[None, int], job_id: str, config: "Config" = None, @@ -128,6 +130,7 @@ def log_args_info( """ self.num_fewshot_seeds = num_fewshot_seeds + self.override_batch_size = override_batch_size self.max_samples = max_samples self.job_id = job_id self.config = config diff --git a/src/lighteval/main_accelerate.py b/src/lighteval/main_accelerate.py index 5a1fe28cf..3764cacdb 100644 --- a/src/lighteval/main_accelerate.py +++ b/src/lighteval/main_accelerate.py @@ -21,6 +21,7 @@ # SOFTWARE. import logging +import os from typing import Optional from typer import Argument, Option @@ -29,6 +30,9 @@ logger = logging.getLogger(__name__) +TOKEN = os.getenv("HF_TOKEN") +CACHE_DIR: str = os.getenv("HF_HOME") + HELP_PANEL_NAME_1 = "Common Parameters" HELP_PANEL_NAME_2 = "Logging Parameters" HELP_PANEL_NAME_3 = "Debug Parameters" @@ -85,17 +89,13 @@ def accelerate( # noqa C901 save_details: Annotated[ bool, Option(help="Save detailed, sample per sample, results.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, - wandb: Annotated[ - bool, - Option( - help="Push results to wandb. This will only work if you have wandb installed and logged in. We use env variable to configure wandb. see here: https://docs.wandb.ai/guides/track/environment-variables/", - rich_help_panel=HELP_PANEL_NAME_2, - ), - ] = False, # === debug === max_samples: Annotated[ Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANEL_NAME_3) ] = None, + override_batch_size: Annotated[ + int, Option(help="Override batch size for evaluation.", rich_help_panel=HELP_PANEL_NAME_3) + ] = -1, job_id: Annotated[ int, Option(help="Optional job id for future reference.", rich_help_panel=HELP_PANEL_NAME_3) ] = 0, @@ -103,14 +103,23 @@ def accelerate( # noqa C901 """ Evaluate models using accelerate and transformers as backend. """ + from datetime import timedelta + + import torch import yaml + from accelerate import Accelerator, InitProcessGroupKwargs from lighteval.logging.evaluation_tracker import EvaluationTracker + from lighteval.models.model_input import GenerationParameters from lighteval.models.transformers.adapter_model import AdapterModelConfig from lighteval.models.transformers.delta_model import DeltaModelConfig - from lighteval.models.transformers.transformers_model import TransformersModelConfig - from lighteval.models.utils import ModelConfig - from lighteval.pipeline import ParallelismManager, Pipeline, PipelineParameters + from lighteval.models.transformers.transformers_model import BitsAndBytesConfig, TransformersModelConfig + from lighteval.pipeline import EnvConfig, ParallelismManager, Pipeline, PipelineParameters + + accelerator = Accelerator(kwargs_handlers=[InitProcessGroupKwargs(timeout=timedelta(seconds=3000))]) + cache_dir = CACHE_DIR + + env_config = EnvConfig(token=TOKEN, cache_dir=cache_dir) evaluation_tracker = EvaluationTracker( output_dir=output_dir, @@ -119,13 +128,14 @@ def accelerate( # noqa C901 push_to_tensorboard=push_to_tensorboard, public=public_run, hub_results_org=results_org, - wandb=wandb, ) pipeline_params = PipelineParameters( launcher_type=ParallelismManager.ACCELERATE, + env_config=env_config, job_id=job_id, dataset_loading_processes=dataset_loading_processes, custom_tasks_directory=custom_tasks, + override_batch_size=override_batch_size, num_fewshot_seeds=num_fewshot_seeds, max_samples=max_samples, use_chat_template=use_chat_template, @@ -133,21 +143,57 @@ def accelerate( # noqa C901 load_responses_from_details_date_id=load_responses_from_details_date_id, ) + # TODO (nathan): better handling of model_args if model_args.endswith(".yaml"): with open(model_args, "r") as f: - config = yaml.safe_load(f)["model_parameters"] - else: - # We extract the model args - config: dict = ModelConfig._parse_args(model_args) + config = yaml.safe_load(f)["model"] - config["use_chat_template"] = use_chat_template + # Creating optional quantization configuration + if config["base_params"]["dtype"] == "4bit": + quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16) + elif config["base_params"]["dtype"] == "8bit": + quantization_config = BitsAndBytesConfig(load_in_8bit=True) + else: + quantization_config = None - if config.get("delta_weights", False): - model_config = DeltaModelConfig(**config) - elif config.get("adapter_weights", False): - model_config = AdapterModelConfig(**config) + # We extract the model args + args_dict = {k.split("=")[0]: k.split("=")[1] for k in config["base_params"]["model_args"].split(",")} + + args_dict["generation_parameters"] = GenerationParameters.from_dict(config) + + # We store the relevant other args + args_dict["base_model"] = config["merged_weights"]["base_model"] + args_dict["compile"] = bool(config["base_params"]["compile"]) + args_dict["dtype"] = config["base_params"]["dtype"] + args_dict["accelerator"] = accelerator + args_dict["quantization_config"] = quantization_config + args_dict["batch_size"] = override_batch_size + args_dict["multichoice_continuations_start_space"] = config["base_params"][ + "multichoice_continuations_start_space" + ] + args_dict["use_chat_template"] = use_chat_template + + # Keeping only non null params + args_dict = {k: v for k, v in args_dict.items() if v is not None} + + if config["merged_weights"].get("delta_weights", False): + if config["merged_weights"]["base_model"] is None: + raise ValueError("You need to specify a base model when using delta weights") + model_config = DeltaModelConfig(**args_dict) + elif config["merged_weights"].get("adapter_weights", False): + if config["merged_weights"]["base_model"] is None: + raise ValueError("You need to specify a base model when using adapter weights") + model_config = AdapterModelConfig(**args_dict) + elif config["merged_weights"]["base_model"] not in ["", None]: + raise ValueError("You can't specify a base model if you are not using delta/adapter weights") + else: + model_config = TransformersModelConfig(**args_dict) else: - model_config = TransformersModelConfig(**config) + model_args_dict: dict = {k.split("=")[0]: k.split("=")[1] if "=" in k else True for k in model_args.split(",")} + model_args_dict["accelerator"] = accelerator + model_args_dict["use_chat_template"] = use_chat_template + model_args_dict["compile"] = bool(model_args_dict["compile"]) if "compile" in model_args_dict else False + model_config = TransformersModelConfig(**model_args_dict) pipeline = Pipeline( tasks=tasks, diff --git a/src/lighteval/main_endpoint.py b/src/lighteval/main_endpoint.py index 371710497..5b473cae9 100644 --- a/src/lighteval/main_endpoint.py +++ b/src/lighteval/main_endpoint.py @@ -19,6 +19,7 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +import os from typing import Optional import typer @@ -29,12 +30,122 @@ app = typer.Typer() +TOKEN = os.getenv("HF_TOKEN") +CACHE_DIR: str = os.getenv("HF_HOME", "/scratch") + HELP_PANEL_NAME_1 = "Common Parameters" HELP_PANEL_NAME_2 = "Logging Parameters" HELP_PANEL_NAME_3 = "Debug Parameters" HELP_PANEL_NAME_4 = "Modeling Parameters" +@app.command(rich_help_panel="Evaluation Backends") +def openai( + # === general === + model_args: Annotated[ + str, + Argument( + help="Model name as a string (has to be available through the openai API) or path to yaml config file (see examples/model_configs/transformers_model.yaml)" + ), + ], + tasks: Annotated[str, Argument(help="Comma-separated list of tasks to evaluate on.")], + # === Common parameters === + system_prompt: Annotated[ + Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) + ] = None, + dataset_loading_processes: Annotated[ + int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANEL_NAME_1) + ] = 1, + custom_tasks: Annotated[ + Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANEL_NAME_1) + ] = None, + cache_dir: Annotated[ + str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANEL_NAME_1) + ] = CACHE_DIR, + num_fewshot_seeds: Annotated[ + int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANEL_NAME_1) + ] = 1, + # === saving === + output_dir: Annotated[ + str, Option(help="Output directory for evaluation results.", rich_help_panel=HELP_PANEL_NAME_2) + ] = "results", + push_to_hub: Annotated[ + bool, Option(help="Push results to the huggingface hub.", rich_help_panel=HELP_PANEL_NAME_2) + ] = False, + push_to_tensorboard: Annotated[ + bool, Option(help="Push results to tensorboard.", rich_help_panel=HELP_PANEL_NAME_2) + ] = False, + public_run: Annotated[ + bool, Option(help="Push results and details to a public repo.", rich_help_panel=HELP_PANEL_NAME_2) + ] = False, + results_org: Annotated[ + Optional[str], Option(help="Organization to push results to.", rich_help_panel=HELP_PANEL_NAME_2) + ] = None, + save_details: Annotated[ + bool, Option(help="Save detailed, sample per sample, results.", rich_help_panel=HELP_PANEL_NAME_2) + ] = False, + # === debug === + max_samples: Annotated[ + Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANEL_NAME_3) + ] = None, + job_id: Annotated[ + int, Option(help="Optional job id for future reference.", rich_help_panel=HELP_PANEL_NAME_3) + ] = 0, +): + """ + Evaluate OPENAI models. + """ + from lighteval.logging.evaluation_tracker import EvaluationTracker + from lighteval.models.endpoints.openai_model import OpenAIModelConfig + from lighteval.pipeline import EnvConfig, ParallelismManager, Pipeline, PipelineParameters + + if model_args.endswith(".yaml"): + model_config = OpenAIModelConfig.from_path(model_args) + else: + model_config = OpenAIModelConfig(model=model_args) + + env_config = EnvConfig(token=TOKEN, cache_dir=cache_dir) + evaluation_tracker = EvaluationTracker( + output_dir=output_dir, + save_details=save_details, + push_to_hub=push_to_hub, + push_to_tensorboard=push_to_tensorboard, + public=public_run, + hub_results_org=results_org, + ) + + parallelism_manager = ParallelismManager.OPENAI + + pipeline_params = PipelineParameters( + launcher_type=parallelism_manager, + env_config=env_config, + job_id=job_id, + dataset_loading_processes=dataset_loading_processes, + custom_tasks_directory=custom_tasks, + override_batch_size=-1, # Cannot override batch size when using OpenAI + num_fewshot_seeds=num_fewshot_seeds, + max_samples=max_samples, + use_chat_template=False, # Cannot use chat template when using OpenAI + system_prompt=system_prompt, + ) + pipeline = Pipeline( + tasks=tasks, + pipeline_parameters=pipeline_params, + evaluation_tracker=evaluation_tracker, + model_config=model_config, + ) + + pipeline.evaluate() + + pipeline.show_results() + + results = pipeline.get_results() + + pipeline.save_and_push_results() + + return results + + @app.command(rich_help_panel="Evaluation Backends") def inference_endpoint( # === general === @@ -62,6 +173,9 @@ def inference_endpoint( custom_tasks: Annotated[ Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANEL_NAME_1) ] = None, + cache_dir: Annotated[ + str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANEL_NAME_1) + ] = CACHE_DIR, num_fewshot_seeds: Annotated[ int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, @@ -87,17 +201,13 @@ def inference_endpoint( save_details: Annotated[ bool, Option(help="Save detailed, sample per sample, results.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, - wandb: Annotated[ - bool, - Option( - help="Push results to wandb. This will only work if you have wandb installed and logged in. We use env variable to configure wandb. see here: https://docs.wandb.ai/guides/track/environment-variables/", - rich_help_panel=HELP_PANEL_NAME_2, - ), - ] = False, # === debug === max_samples: Annotated[ Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANEL_NAME_3) ] = None, + override_batch_size: Annotated[ + int, Option(help="Override batch size for evaluation.", rich_help_panel=HELP_PANEL_NAME_3) + ] = None, job_id: Annotated[ int, Option(help="Optional job id for future reference.", rich_help_panel=HELP_PANEL_NAME_3) ] = 0, @@ -107,8 +217,9 @@ def inference_endpoint( """ from lighteval.logging.evaluation_tracker import EvaluationTracker from lighteval.models.endpoints.endpoint_model import InferenceEndpointModelConfig, ServerlessEndpointModelConfig - from lighteval.pipeline import ParallelismManager, Pipeline, PipelineParameters + from lighteval.pipeline import EnvConfig, ParallelismManager, Pipeline, PipelineParameters + env_config = EnvConfig(token=TOKEN, cache_dir=cache_dir) evaluation_tracker = EvaluationTracker( output_dir=output_dir, save_details=save_details, @@ -116,11 +227,13 @@ def inference_endpoint( push_to_tensorboard=push_to_tensorboard, public=public_run, hub_results_org=results_org, - wandb=wandb, ) + # TODO (nathan): better handling of model_args + parallelism_manager = ParallelismManager.NONE # since we're using inference endpoints in remote + # Find a way to add this back if free_endpoint: model_config = ServerlessEndpointModelConfig.from_path(model_config_path) else: @@ -128,9 +241,11 @@ def inference_endpoint( pipeline_params = PipelineParameters( launcher_type=parallelism_manager, + env_config=env_config, job_id=job_id, dataset_loading_processes=dataset_loading_processes, custom_tasks_directory=custom_tasks, + override_batch_size=override_batch_size, num_fewshot_seeds=num_fewshot_seeds, max_samples=max_samples, use_chat_template=use_chat_template, @@ -175,6 +290,9 @@ def tgi( custom_tasks: Annotated[ Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANEL_NAME_1) ] = None, + cache_dir: Annotated[ + str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANEL_NAME_1) + ] = CACHE_DIR, num_fewshot_seeds: Annotated[ int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, @@ -204,6 +322,9 @@ def tgi( max_samples: Annotated[ Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANEL_NAME_3) ] = None, + override_batch_size: Annotated[ + int, Option(help="Override batch size for evaluation.", rich_help_panel=HELP_PANEL_NAME_3) + ] = -1, job_id: Annotated[ int, Option(help="Optional job id for future reference.", rich_help_panel=HELP_PANEL_NAME_3) ] = 0, @@ -211,13 +332,11 @@ def tgi( """ Evaluate models using TGI as backend. """ - import yaml - from lighteval.logging.evaluation_tracker import EvaluationTracker from lighteval.models.endpoints.tgi_model import TGIModelConfig - from lighteval.models.model_input import GenerationParameters - from lighteval.pipeline import ParallelismManager, Pipeline, PipelineParameters + from lighteval.pipeline import EnvConfig, ParallelismManager, Pipeline, PipelineParameters + env_config = EnvConfig(token=TOKEN, cache_dir=cache_dir) evaluation_tracker = EvaluationTracker( output_dir=output_dir, save_details=save_details, @@ -227,19 +346,18 @@ def tgi( hub_results_org=results_org, ) + # TODO (nathan): better handling of model_args parallelism_manager = ParallelismManager.TGI - with open(model_config_path, "r") as f: - config = yaml.safe_load(f) - - generation_parameters = GenerationParameters(**config.get("generation", {})) - model_config = TGIModelConfig(**config["model"], generation_parameters=generation_parameters) + model_config = TGIModelConfig.from_path(model_config_path) pipeline_params = PipelineParameters( launcher_type=parallelism_manager, + env_config=env_config, job_id=job_id, dataset_loading_processes=dataset_loading_processes, custom_tasks_directory=custom_tasks, + override_batch_size=override_batch_size, num_fewshot_seeds=num_fewshot_seeds, max_samples=max_samples, use_chat_template=use_chat_template, @@ -287,6 +405,9 @@ def litellm( custom_tasks: Annotated[ Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANEL_NAME_1) ] = None, + cache_dir: Annotated[ + str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANEL_NAME_1) + ] = CACHE_DIR, num_fewshot_seeds: Annotated[ int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, @@ -316,6 +437,9 @@ def litellm( max_samples: Annotated[ Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANEL_NAME_3) ] = None, + override_batch_size: Annotated[ + int, Option(help="Override batch size for evaluation.", rich_help_panel=HELP_PANEL_NAME_3) + ] = -1, job_id: Annotated[ int, Option(help="Optional job id for future refenrence.", rich_help_panel=HELP_PANEL_NAME_3) ] = 0, @@ -324,12 +448,11 @@ def litellm( Evaluate models using LiteLLM as backend. """ - import yaml - from lighteval.logging.evaluation_tracker import EvaluationTracker from lighteval.models.litellm_model import LiteLLMModelConfig - from lighteval.pipeline import ParallelismManager, Pipeline, PipelineParameters + from lighteval.pipeline import EnvConfig, ParallelismManager, Pipeline, PipelineParameters + env_config = EnvConfig(token=TOKEN, cache_dir=cache_dir) evaluation_tracker = EvaluationTracker( output_dir=output_dir, save_details=save_details, @@ -339,22 +462,22 @@ def litellm( hub_results_org=results_org, ) + # TODO (nathan): better handling of model_args parallelism_manager = ParallelismManager.NONE if model_args.endswith(".yaml"): - with open(model_args, "r") as f: - config = yaml.safe_load(f) - metric_options = config.get("metric_options", {}) model_config = LiteLLMModelConfig.from_path(model_args) else: - metric_options = None - model_config = LiteLLMModelConfig.from_args(model_args) + model_args_dict: dict = {k.split("=")[0]: k.split("=")[1] if "=" in k else True for k in model_args.split(",")} + model_config = LiteLLMModelConfig(**model_args_dict) pipeline_params = PipelineParameters( launcher_type=parallelism_manager, + env_config=env_config, job_id=job_id, dataset_loading_processes=dataset_loading_processes, custom_tasks_directory=custom_tasks, + override_batch_size=override_batch_size, num_fewshot_seeds=num_fewshot_seeds, max_samples=max_samples, use_chat_template=use_chat_template, @@ -366,7 +489,6 @@ def litellm( pipeline_parameters=pipeline_params, evaluation_tracker=evaluation_tracker, model_config=model_config, - metric_options=metric_options, ) pipeline.evaluate() @@ -438,8 +560,9 @@ def inference_providers( from lighteval.models.endpoints.inference_providers_model import ( InferenceProvidersModelConfig, ) - from lighteval.pipeline import ParallelismManager, Pipeline, PipelineParameters + from lighteval.pipeline import EnvConfig, ParallelismManager, Pipeline, PipelineParameters + env_config = EnvConfig(token=TOKEN, cache_dir=CACHE_DIR) evaluation_tracker = EvaluationTracker( output_dir=output_dir, save_details=save_details, @@ -460,9 +583,11 @@ def inference_providers( pipeline_params = PipelineParameters( launcher_type=parallelism_manager, + env_config=env_config, job_id=job_id, dataset_loading_processes=dataset_loading_processes, custom_tasks_directory=custom_tasks, + override_batch_size=None, num_fewshot_seeds=num_fewshot_seeds, max_samples=max_samples, use_chat_template=True, diff --git a/src/lighteval/main_sglang.py b/src/lighteval/main_sglang.py index 43b16ca6a..c819e1cb8 100644 --- a/src/lighteval/main_sglang.py +++ b/src/lighteval/main_sglang.py @@ -19,12 +19,16 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +import os from typing import Optional from typer import Argument, Option from typing_extensions import Annotated +TOKEN = os.getenv("HF_TOKEN") +CACHE_DIR: str = os.getenv("HF_HOME", "/scratch") + HELP_PANEL_NAME_1 = "Common Parameters" HELP_PANEL_NAME_2 = "Logging Parameters" HELP_PANEL_NAME_3 = "Debug Parameters" @@ -53,6 +57,9 @@ def sglang( custom_tasks: Annotated[ Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANEL_NAME_1) ] = None, + cache_dir: Annotated[ + str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANEL_NAME_1) + ] = CACHE_DIR, num_fewshot_seeds: Annotated[ int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, @@ -78,13 +85,6 @@ def sglang( save_details: Annotated[ bool, Option(help="Save detailed, sample per sample, results.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, - wandb: Annotated[ - bool, - Option( - help="Push results to wandb. This will only work if you have wandb installed and logged in. We use env variable to configure wandb. see here: https://docs.wandb.ai/guides/track/environment-variables/", - rich_help_panel=HELP_PANEL_NAME_2, - ), - ] = False, # === debug === max_samples: Annotated[ Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANEL_NAME_3) @@ -99,8 +99,13 @@ def sglang( import yaml from lighteval.logging.evaluation_tracker import EvaluationTracker + from lighteval.models.model_input import GenerationParameters from lighteval.models.sglang.sglang_model import SGLangModelConfig - from lighteval.pipeline import ParallelismManager, Pipeline, PipelineParameters + from lighteval.pipeline import EnvConfig, ParallelismManager, Pipeline, PipelineParameters + + TOKEN = os.getenv("HF_TOKEN") + + env_config = EnvConfig(token=TOKEN, cache_dir=cache_dir) evaluation_tracker = EvaluationTracker( output_dir=output_dir, @@ -109,14 +114,15 @@ def sglang( push_to_tensorboard=push_to_tensorboard, public=public_run, hub_results_org=results_org, - wandb=wandb, ) pipeline_params = PipelineParameters( launcher_type=ParallelismManager.SGLANG, + env_config=env_config, job_id=job_id, dataset_loading_processes=dataset_loading_processes, custom_tasks_directory=custom_tasks, + override_batch_size=-1, num_fewshot_seeds=num_fewshot_seeds, max_samples=max_samples, use_chat_template=use_chat_template, @@ -126,18 +132,20 @@ def sglang( if model_args.endswith(".yaml"): with open(model_args, "r") as f: - metric_options = yaml.safe_load(f).get("metric_options", {}) - model_config = SGLangModelConfig.from_path(model_args) + config = yaml.safe_load(f)["model"] + model_args = config["base_params"]["model_args"] + generation_parameters = GenerationParameters.from_dict(config) else: - metric_options = {} - model_config = SGLangModelConfig.from_args(model_args) + generation_parameters = GenerationParameters() + + model_args_dict: dict = {k.split("=")[0]: k.split("=")[1] if "=" in k else True for k in model_args.split(",")} + model_config = SGLangModelConfig(**model_args_dict, generation_parameters=generation_parameters) pipeline = Pipeline( tasks=tasks, pipeline_parameters=pipeline_params, evaluation_tracker=evaluation_tracker, model_config=model_config, - metric_options=metric_options, ) pipeline.evaluate() diff --git a/src/lighteval/main_vllm.py b/src/lighteval/main_vllm.py index 31b37b100..6f0ef659e 100644 --- a/src/lighteval/main_vllm.py +++ b/src/lighteval/main_vllm.py @@ -19,12 +19,17 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +import os +import re from typing import Optional from typer import Argument, Option from typing_extensions import Annotated +TOKEN = os.getenv("HF_TOKEN") +CACHE_DIR: str = os.getenv("HF_HOME", "/scratch") + HELP_PANEL_NAME_1 = "Common Parameters" HELP_PANEL_NAME_2 = "Logging Parameters" HELP_PANEL_NAME_3 = "Debug Parameters" @@ -47,15 +52,15 @@ def vllm( system_prompt: Annotated[ Optional[str], Option(help="Use system prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) ] = None, - cot_prompt: Annotated[ - Optional[str], Option(help="Use chain of thought prompt for evaluation.", rich_help_panel=HELP_PANEL_NAME_4) - ] = None, dataset_loading_processes: Annotated[ int, Option(help="Number of processes to use for dataset loading.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, custom_tasks: Annotated[ Optional[str], Option(help="Path to custom tasks directory.", rich_help_panel=HELP_PANEL_NAME_1) ] = None, + cache_dir: Annotated[ + str, Option(help="Cache directory for datasets and models.", rich_help_panel=HELP_PANEL_NAME_1) + ] = CACHE_DIR, num_fewshot_seeds: Annotated[ int, Option(help="Number of seeds to use for few-shot evaluation.", rich_help_panel=HELP_PANEL_NAME_1) ] = 1, @@ -81,13 +86,6 @@ def vllm( save_details: Annotated[ bool, Option(help="Save detailed, sample per sample, results.", rich_help_panel=HELP_PANEL_NAME_2) ] = False, - wandb: Annotated[ - bool, - Option( - help="Push results to wandb. This will only work if you have wandb installed and logged in. We use env variable to configure wandb. see here: https://docs.wandb.ai/guides/track/environment-variables/", - rich_help_panel=HELP_PANEL_NAME_2, - ), - ] = False, # === debug === max_samples: Annotated[ Optional[int], Option(help="Maximum number of samples to evaluate on.", rich_help_panel=HELP_PANEL_NAME_3) @@ -102,8 +100,13 @@ def vllm( import yaml from lighteval.logging.evaluation_tracker import EvaluationTracker + from lighteval.models.model_input import GenerationParameters from lighteval.models.vllm.vllm_model import VLLMModelConfig - from lighteval.pipeline import ParallelismManager, Pipeline, PipelineParameters + from lighteval.pipeline import EnvConfig, ParallelismManager, Pipeline, PipelineParameters + + TOKEN = os.getenv("HF_TOKEN") + + env_config = EnvConfig(token=TOKEN, cache_dir=cache_dir) evaluation_tracker = EvaluationTracker( output_dir=output_dir, @@ -112,29 +115,36 @@ def vllm( push_to_tensorboard=push_to_tensorboard, public=public_run, hub_results_org=results_org, - wandb=wandb, ) pipeline_params = PipelineParameters( launcher_type=ParallelismManager.VLLM, + env_config=env_config, job_id=job_id, dataset_loading_processes=dataset_loading_processes, custom_tasks_directory=custom_tasks, + override_batch_size=-1, # Cannot override batch size when using vLLM; Configure `max_num_seqs` and `max_num_batched_tokens` in `VLLMModelConfig` instead. num_fewshot_seeds=num_fewshot_seeds, max_samples=max_samples, use_chat_template=use_chat_template, system_prompt=system_prompt, - cot_prompt=cot_prompt, load_responses_from_details_date_id=load_responses_from_details_date_id, ) if model_args.endswith(".yaml"): with open(model_args, "r") as f: - metric_options = yaml.safe_load(f).get("metric_options", {}) - model_config = VLLMModelConfig.from_path(model_args) + config = yaml.safe_load(f)["model"] + model_args = config["base_params"]["model_args"] + metric_options = config.get("metric_options", {}) + generation_parameters = GenerationParameters.from_dict(config) else: + generation_parameters = GenerationParameters.from_model_args(model_args) + # We slice out generation_parameters from model_args to avoid double-counting in the VLLMModelConfig + model_args = re.sub(r"generation_parameters=\{.*?\},?", "", model_args).strip(",") metric_options = {} - model_config = VLLMModelConfig.from_args(model_args) + + model_args_dict: dict = {k.split("=")[0]: k.split("=")[1] if "=" in k else True for k in model_args.split(",")} + model_config = VLLMModelConfig(**model_args_dict, generation_parameters=generation_parameters) pipeline = Pipeline( tasks=tasks, diff --git a/src/lighteval/metrics/llm_as_judge.py b/src/lighteval/metrics/llm_as_judge.py index d383d61f9..99b55d403 100644 --- a/src/lighteval/metrics/llm_as_judge.py +++ b/src/lighteval/metrics/llm_as_judge.py @@ -21,17 +21,13 @@ # SOFTWARE. -import asyncio import logging import time from concurrent.futures import ThreadPoolExecutor -from typing import Callable, Literal, Optional +from typing import Callable, Literal -from huggingface_hub import AsyncInferenceClient, InferenceTimeoutError from pydantic import BaseModel -from requests.exceptions import HTTPError from tqdm import tqdm -from tqdm.asyncio import tqdm_asyncio from lighteval.utils.imports import is_litellm_available, is_openai_available, is_vllm_available from lighteval.utils.utils import as_list @@ -82,28 +78,10 @@ def __init__( model: str, templates: Callable, process_judge_response: Callable, - judge_backend: Literal["litellm", "openai", "transformers", "tgi", "vllm", "inference-providers"], + judge_backend: Literal["litellm", "openai", "transformers", "tgi", "vllm"], url: str | None = None, api_key: str | None = None, - max_tokens: int = 512, response_format: BaseModel = None, - hf_provider: Optional[ - Literal[ - "black-forest-labs", - "cerebras", - "cohere", - "fal-ai", - "fireworks-ai", - "inference-providers", - "hyperbolic", - "nebius", - "novita", - "openai", - "replicate", - "sambanova", - "together", - ] - ] = None, ): self.model = model self.template = templates @@ -118,47 +96,33 @@ def __init__( self.url = url self.api_key = api_key self.backend = judge_backend - self.hf_provider = hf_provider - self.max_tokens = max_tokens self.response_format = response_format if not None else DEFAULT_FORMAT - # Validate that hf_provider is specified when using inference-providers backend - if self.backend == "inference-providers" and self.hf_provider is None: - raise ValueError("When using 'inference-providers' as backend, you must specify an 'hf_provider'") - def __lazy_load_client(self): match self.backend: - # Both "openai" and "tgi" backends use the OpenAI-compatible API - # They are handled separately to allow for backend-specific validation and setup - case "openai" | "tgi": - if not is_openai_available(): - raise RuntimeError("OpenAI backend is not available.") + # Wether we use openai or TGI models, we go through the openai API + # to route to the endpoint + case "openai" | "tgi" if is_openai_available(): if self.client is None: from openai import OpenAI - self.client = OpenAI( - api_key=self.api_key if self.url is None else None, base_url=self.url if self.url else None - ) + if self.url is None: + self.client = OpenAI(api_key=self.api_key) + else: + self.client = OpenAI(base_url=self.url, api_key=self.api_key) return self.__call_api_parallel - - case "litellm": - if not is_litellm_available(): - raise RuntimeError("litellm is not available.") + case "litellm" if is_litellm_available(): return self.__call_litellm - - case "vllm": - if not is_vllm_available(): - raise RuntimeError("vllm is not available.") + case "vllm" if is_vllm_available(): if self.pipe is None: from vllm import LLM, SamplingParams from vllm.transformers_utils.tokenizer import get_tokenizer - self.sampling_params = SamplingParams(temperature=0.8, top_p=0.95, max_tokens=self.max_tokens) + self.sampling_params = SamplingParams(temperature=0.8, top_p=0.95, max_tokens=512) self.tokenizer = get_tokenizer(self.model, tokenizer_mode="auto") self.pipe = LLM(model=self.model, max_model_len=2048, gpu_memory_utilization=0.5, dtype="float16") return self.__call_vllm - case "transformers": if self.pipe is None: import torch @@ -172,18 +136,11 @@ def __lazy_load_client(self): "text-generation", model=transformers_model, tokenizer=tokenizer, - max_new_tokens=self.max_tokens, + max_new_tokens=256, ) return self.__call_transformers - - case "inference-providers": - from huggingface_hub import AsyncInferenceClient - - self.client = AsyncInferenceClient(token=self.api_key, base_url=self.url, provider=self.hf_provider) - return self.__call_hf_inference_async - case _: - raise ValueError(f"Unsupported backend: {self.backend}") + return lambda x: x def dict_of_lists_to_list_of_dicts(self, dict_of_lists): """ @@ -330,44 +287,6 @@ def __call_api(prompt): return results - def __call_hf_inference_async(self, prompts): - async def run_all() -> list[str]: - """Wrap inference call into function""" - tasks = (self.__call_hf_inference(prompt) for prompt in prompts) - return await tqdm_asyncio.gather(*tasks, desc="HF inference", total=len(prompts)) - - try: - loop = asyncio.get_running_loop() - logger.debug("Exting event loop is found, using loop.create_task") - result = loop.run_until_complete(run_all()) - except RuntimeError: - logger.debug("No running event loop found, using asyncio.run") - result = asyncio.run(run_all()) - - if None in result: - logger.warning("None found in inference results") - - return result - - async def __call_hf_inference(self, prompt): - self.client: AsyncInferenceClient - for _ in range(self.API_MAX_RETRY): - try: - response = await self.client.chat_completion( - model=self.model, - messages=prompt, - max_tokens=self.max_tokens, - ) - return response.choices[0].message.content - except (InferenceTimeoutError, HTTPError) as e: - logger.warning(f"HTTP error during HF inference: {e}") - await asyncio.sleep(self.API_RETRY_SLEEP) - except Exception as e: - logger.warning(f"Unexpected error during HF inference: {e}") - await asyncio.sleep(self.API_RETRY_SLEEP) - - raise Exception("Failed to get response from the HF API") - def __call_api_parallel(self, prompts): results = [] with ThreadPoolExecutor(10) as executor: diff --git a/src/lighteval/metrics/metrics.py b/src/lighteval/metrics/metrics.py index 4465099aa..748b39022 100644 --- a/src/lighteval/metrics/metrics.py +++ b/src/lighteval/metrics/metrics.py @@ -20,6 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. + import numpy as np from aenum import Enum @@ -27,9 +28,6 @@ ExprExtractionConfig, IndicesExtractionConfig, LatexExtractionConfig, - compare_gold_target, - extract_target_from_pred, - get_extraction_regexes, multilingual_extractive_match_metric, ) from lighteval.metrics.harness_compatibility.drop import drop_metrics @@ -50,8 +48,6 @@ Extractiveness, F1_score, Faithfulness, - GPassAtK, - JudgeLLMSimpleQA, LoglikelihoodAcc, MajAtK, PassAtK, @@ -370,167 +366,6 @@ class Metrics(Enum): corpus_level_fn=np.mean, higher_is_better=True, ) - math_pass_at_1_4n = SampleLevelMetric( - metric_name="math_pass@1:4_samples", - sample_level_fn=PassAtK( - k=1, - n=4, - strip_strings=True, - # Extracting mathematical expressions and latex expressions - normalize_gold=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Extracting mathematical expressions and latex expressions - normalize_pred=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Uses sympy for comparision - sample_scoring_function=compare_gold_target, - ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, - corpus_level_fn=np.mean, - higher_is_better=True, - ) - math_pass_at_1_8n = SampleLevelMetric( - metric_name="math_pass@1:8_samples", - sample_level_fn=PassAtK( - k=1, - n=8, - strip_strings=True, - # Extracting mathematical expressions and latex expressions - normalize_gold=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Extracting mathematical expressions and latex expressions - normalize_pred=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Uses sympy for comparision - sample_scoring_function=compare_gold_target, - ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, - corpus_level_fn=np.mean, - higher_is_better=True, - ) - math_pass_at_1_16n = SampleLevelMetric( - metric_name="math_pass@1:16_samples", - sample_level_fn=PassAtK( - k=1, - n=16, - strip_strings=True, - # Extracting mathematical expressions and latex expressions - normalize_gold=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Extracting mathematical expressions and latex expressions - normalize_pred=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Uses sympy for comparision - sample_scoring_function=compare_gold_target, - ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, - corpus_level_fn=np.mean, - higher_is_better=True, - ) - math_pass_at_1_32n = SampleLevelMetric( - metric_name="math_pass@1:32_samples", - sample_level_fn=PassAtK( - k=1, - n=32, - strip_strings=True, - # Extracting mathematical expressions and latex expressions - normalize_gold=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Extracting mathematical expressions and latex expressions - normalize_pred=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Uses sympy for comparision - sample_scoring_function=compare_gold_target, - ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, - corpus_level_fn=np.mean, - higher_is_better=True, - ) - math_pass_at_1_64n = SampleLevelMetric( - metric_name="math_pass@1:64_samples", - sample_level_fn=PassAtK( - k=1, - n=64, - strip_strings=True, - # Extracting mathematical expressions and latex expressions - normalize_gold=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Extracting mathematical expressions and latex expressions - normalize_pred=lambda k: extract_target_from_pred( - k, - get_extraction_regexes( - formatted_doc=None, - target_types=[ExprExtractionConfig(), LatexExtractionConfig()], - language=Language.ENGLISH, - ), - ), - # Uses sympy for comparision - sample_scoring_function=compare_gold_target, - ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, - corpus_level_fn=np.mean, - higher_is_better=True, - ) - mrr = SampleLevelMetric( metric_name="mrr", sample_level_fn=MRR().compute, @@ -579,64 +414,6 @@ class Metrics(Enum): corpus_level_fn=np.mean, higher_is_better=True, ) - g_pass_at_16 = SampleLevelMetricGrouping( - metric_name="G-Pass@16:48_samples", - sample_level_fn=GPassAtK(k=16, n=48, strip_strings=True).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, - corpus_level_fn={metric: np.mean for metric in GPassAtK(k=16, n=48, strip_strings=True).all_metrics}, - higher_is_better={metric: True for metric in GPassAtK(k=16, n=48, strip_strings=True).all_metrics}, - ) - g_pass_at_8_16 = SampleLevelMetricGrouping( - metric_name="G-Pass@8-16:48_samples", - sample_level_fn=GPassAtK(k=[8, 16], n=48, strip_strings=True).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, - corpus_level_fn={metric: np.mean for metric in GPassAtK(k=16, n=48, strip_strings=True).all_metrics}, - higher_is_better={metric: True for metric in GPassAtK(k=16, n=48, strip_strings=True).all_metrics}, - ) - g_pass_at_16_expr_gold = SampleLevelMetricGrouping( - metric_name="G-Pass@16:48_samples", - sample_level_fn=GPassAtK( - k=16, - n=48, - strip_strings=True, - sample_scoring_function=lambda pred, ref, doc: multilingual_extractive_match_metric( - language=Language.ENGLISH, - fallback_mode="first_match", - precision=5, - gold_extraction_target=(ExprExtractionConfig(),), - # Match boxed first before trying other regexes - pred_extraction_target=(ExprExtractionConfig(), LatexExtractionConfig(boxed_match_priority=0)), - aggregation_function=max, - ).sample_level_fn([ref], [pred], doc), - ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, - corpus_level_fn={metric: np.mean for metric in GPassAtK(k=16, n=48, strip_strings=True).all_metrics}, - higher_is_better={metric: True for metric in GPassAtK(k=16, n=48, strip_strings=True).all_metrics}, - ) - g_pass_at_16_latex_gold = SampleLevelMetricGrouping( - metric_name="G-Pass@16:48_samples", - sample_level_fn=GPassAtK( - k=16, - n=48, - strip_strings=True, - sample_scoring_function=lambda pred, ref, doc: multilingual_extractive_match_metric( - language=Language.ENGLISH, - fallback_mode="first_match", - precision=5, - gold_extraction_target=(LatexExtractionConfig(),), - # Match boxed first before trying other regexes - pred_extraction_target=(ExprExtractionConfig(), LatexExtractionConfig(boxed_match_priority=0)), - aggregation_function=max, - ).sample_level_fn([ref], [pred], doc), - ).compute, - category=MetricCategory.GENERATIVE_SAMPLING, - use_case=MetricUseCase.REASONING, - corpus_level_fn={metric: np.mean for metric in GPassAtK(k=16, n=48, strip_strings=True).all_metrics}, - higher_is_better={metric: True for metric in GPassAtK(k=16, n=48, strip_strings=True).all_metrics}, - ) perfect_exact_match = SampleLevelMetric( metric_name="perfect_em", sample_level_fn=ExactMatches().compute, @@ -790,16 +567,6 @@ class Metrics(Enum): corpus_level_fn=np.mean, higher_is_better=True, ) - simpleqa_judge = SampleLevelMetricGrouping( - metric_name=["simpleqa_judge"], - higher_is_better={"simpleqa_judge": True}, - category=MetricCategory.LLM_AS_JUDGE, - use_case=MetricUseCase.SUMMARIZATION, - sample_level_fn=JudgeLLMSimpleQA().compute, - corpus_level_fn={ - "simpleqa_judge": np.mean, - }, - ) target_perplexity = SampleLevelMetric( metric_name="ppl", sample_level_fn=PerplexityPreparator(units_type="words").prepare, diff --git a/src/lighteval/metrics/metrics_sample.py b/src/lighteval/metrics/metrics_sample.py index 76598c1bb..821320c6c 100644 --- a/src/lighteval/metrics/metrics_sample.py +++ b/src/lighteval/metrics/metrics_sample.py @@ -36,7 +36,6 @@ from nltk.tokenize.treebank import TreebankWordTokenizer from nltk.translate.bleu_score import sentence_bleu from pydantic import BaseModel -from scipy.stats import hypergeom from transformers import AutoModelForSequenceClassification, AutoTokenizer from lighteval.metrics.imports.bert_scorer import BERTScorer @@ -50,7 +49,6 @@ remove_braces, remove_braces_and_strip, ) -from lighteval.metrics.utils.judge_utils import get_judge_prompt_simpleqa, process_judge_response_simpleqa from lighteval.tasks.requests import Doc from lighteval.utils.utils import as_list, safe_divide @@ -874,44 +872,30 @@ def __init__( judge_model_name: str, template: Callable, process_judge_response: Callable, - judge_backend: Literal["litellm", "openai", "transformers", "vllm", "tgi", "inference-providers"], + judge_backend: Literal["litellm", "openai", "transformers", "vllm", "tgi"], short_judge_name: str | None = None, response_format: BaseModel = None, - url: str | None = None, - hf_provider: str | None = None, - max_tokens: int | None = None, ) -> None: - logger.debug(f"Initializing JudgeLLM with backend: {judge_backend}, model: {judge_model_name}") - - api_key = None - match judge_backend: case "openai": if judge_model_name not in self.available_models_openai: raise ValueError(f"{judge_model_name} not in available models for llm as a judge metric") - api_key = os.getenv("OPENAI_API_KEY") - logger.debug("Using OpenAI backend for llm as a judge metric") - + else: + api_key = os.getenv("OPENAI_API_KEY") + url = None case "tgi": api_key = os.getenv("HF_TOKEN") - if url is None: - url = "https://api-inference.huggingface.co/v1/" - logger.debug("Using TGI backend") - - case "inference-providers": - api_key = os.getenv("HF_TOKEN") - logger.debug("Using Hugging Face Inference backend") - + url = "https://api-inference.huggingface.co/v1/" case "litellm": - logger.debug("Using LiteLLM backend for llm as a judge metric") - + api_key = None + url = None case "transformers" | "vllm": - logger.debug("Checking availability of Transformers or VLLM model") api = HfApi() models = api.list_models(model_name=judge_model_name) + url = None + api_key = None if not models: - raise ValueError(f"{judge_model_name} not found on Hugging Face Hub") - + raise ValueError(f"{judge_model_name} not in available models for llm as a judge metric") case _: raise ValueError(f"{judge_backend} is not a valid backend for llm as a judge metric") @@ -920,55 +904,16 @@ def __init__( model=judge_model_name, templates=template, process_judge_response=process_judge_response, - judge_backend=judge_backend, - response_format=response_format, api_key=api_key, url=url, - hf_provider=hf_provider, - max_tokens=max_tokens, + judge_backend=judge_backend, + response_format=response_format, ) def compute(self, predictions: list[str], formatted_doc: Doc, **kwargs) -> dict[str, float]: raise NotImplementedError("This method should be implemented in the subclass.") -class JudgeLLMSimpleQA(JudgeLLM): - def __init__(self): - super().__init__( - judge_model_name="gpt-4o-2024-08-06", - template=get_judge_prompt_simpleqa, - process_judge_response=process_judge_response_simpleqa, - judge_backend="openai", - short_judge_name="gpt4o", - ) - - def compute(self, sample_ids: list[str], responses: list, formatted_docs: list[Doc], **kwargs) -> dict[str, float]: - """ - Compute the score of a generative task using a llm as a judge. - The generative task can be multiturn with 2 turns max, in that case, we - return scores for turn 1 and 2. Also returns user_prompt and judgement - which are ignored later by the aggregator. - """ - questions = [formatted_doc.query for formatted_doc in formatted_docs] - options = [formatted_doc.choices for formatted_doc in formatted_docs] - golds = [formatted_doc.get_golds()[0] for formatted_doc in formatted_docs] - predictions = [response[0].result[0] for response in responses] - - scores, messages, judgements = self.judge.evaluate_answer_batch(questions, predictions, options, golds) - - metrics = [] - for i in range(len(sample_ids)): - metrics.append( - { - "simpleqa_judge": scores[i], - f"user_prompt_{self.short_judge_name}": messages[i], - f"judgement_{self.short_judge_name}": judgements[i], - } - ) - - return metrics - - class JudgeLLMMTBench(JudgeLLM): def compute(self, predictions: list[str], formatted_doc: Doc, **kwargs): """ @@ -1228,164 +1173,3 @@ def pass_at_k(self, all_scores: list[int]) -> float: return 1.0 return 1.0 - np.prod(1.0 - self.k / np.arange(self.n - c + 1, self.n + 1)) - - -class GPassAtK: - def __init__( - self, - k: Union[int, list[int]], - n: int = None, - thresholds: list[float] = [0.0, 0.25, 0.5, 0.75, 1.0], - normalize_gold: Callable = None, - normalize_pred: Callable = None, - strip_strings: bool = False, - sample_scoring_function: Union[Callable[[str, str], float], str] = None, - ): - """Computing G-Pass@k from http://arxiv.org/abs/2412.13147 - - Args: - k (int, list): The number of successful attempts to be considered. - n (int): Number of samples to generate. - thresholds (list): Thresholds to control successful attempts in k generate. - normalize_gold (callable, optional): Function to use to normalize the reference strings. - Defaults to None if no normalization is applied. - normalize_pred (callable, optional): Function to use to normalize the predicted strings. - Defaults to None if no normalization is applied. - strip_strings (bool, optional): Whether to strip both reference and predictions. Defaults to False. - sample_scoring_function (callable or str, optional): Function to use to score each sample. - Either pass the full function (should take a string prediction and a string gold, and return a score between 0 and 1) - a string (any of `prefix`, `suffix` or `full`) to define the type of exact match that you want, or nothing to defaults to "full". - `prefix` checks if the prediction starts with the gold, - `suffix` if the prediction ends with the gold, - `full` if the prediction and gold are equal - """ - self.k = as_list(k) - self.n = n - self.thresholds = thresholds - self.normalize_gold = normalize_gold - self.normalize_pred = normalize_pred - self.strip_strings = strip_strings - - # Managed the logic of the per prediction of sample scoring - if callable(sample_scoring_function): - self.score_sample = sample_scoring_function - self.type_exact_match = None - else: - if isinstance(sample_scoring_function, str): - if sample_scoring_function not in ["prefix", "suffix", "full"]: - raise ValueError( - f"type_exact_match (used in parametrized_exact_match) must be one of prefix, suffix, or full. Was {sample_scoring_function} instead." - ) - self.type_exact_match = sample_scoring_function - else: - self.type_exact_match = "full" - self.score_sample = self.default_sample_scoring - - def compute(self, predictions: list[str], formatted_doc: list[Doc], **kwargs) -> dict[str, float]: - """Computes the metric over a list of golds and predictions for one single item with possibly many samples. - It applies normalisation (if needed) to model prediction and gold, computes their per prediction score, - then aggregates the scores over the samples using a pass@k. - - Args: - golds (list[str]): Reference targets - predictions (list[str]): k predicted strings - - Returns: - float: Aggregated score over the current sample's items. - """ - golds = formatted_doc.get_golds() - - if len(golds) > 1: - raise Exception("Cannot compute G-Pass@k with several golds") - - if self.n is None: - self.n = len(predictions) - logger.warning( - "n undefined in the G-Pass@k. We assume it's the same as the sample's number of predictions." - ) - elif len(predictions) < self.n: - logger.warning(f"Number of predictions is less than {self.n} for G-Pass@k.") - - gold = self.get_processed_gold(golds[0]) - - all_scores = [] - for pred in predictions[: self.n]: - cur_pred = self.get_processed_pred(pred=pred) - all_scores.append(self.score_sample(cur_pred, gold, formatted_doc)) - - return self.g_pass_at_k(all_scores) - - def get_processed_gold(self, gold: str) -> str: - if self.strip_strings: - gold = gold.strip() - - if self.normalize_gold: - gold = self.normalize_gold(gold) - - return gold - - def get_processed_pred(self, pred: str) -> str: - if not pred: - return "" - - if self.strip_strings: - pred = pred.strip() - - if self.normalize_pred: - pred = self.normalize_pred(pred) - - return pred - - def default_sample_scoring(self, pred: str, gold: str) -> int: - if self.type_exact_match == "prefix": - return 1 if pred.startswith(gold) else 0 - if self.type_exact_match == "suffix": - return 1 if pred.endswith(gold) else 0 - return 1 if gold == pred else 0 - - def g_pass_at_k(self, all_scores: list[int]) -> float: - """Computation of G-Pass@k details from http://arxiv.org/abs/2412.13147""" - c: int = sum(all_scores) - n: int = self.n - ks: int = self.k - thresholds: list[float] = self.thresholds - - def _compute_g_pass_at_k(n, c, k, m): - if m > min(c, k) or k > n or c < 0 or n <= 0 or m < 0: - return 0.0 - return hypergeom.sf(m - 1, n, c, k) - - def compute_g_pass_at_k(n, c, k, t): - m = max(int(np.ceil(k * t)), 1) - return _compute_g_pass_at_k(n, c, k, m) - - def compute_mg_pass_at_k(n, c, k): - low, high = int(np.ceil(k * 0.5)), k - - mg_pass_at_k = 0.0 - for i in range(low + 1, high + 1): - mg_pass_at_k += _compute_g_pass_at_k(n, c, k, i) - mg_pass_at_k = 2 * mg_pass_at_k / k - - return mg_pass_at_k - - metrics = {} - for k in ks: - for t in thresholds: - metrics[f"G-Pass@{k}_{t}"] = compute_g_pass_at_k(n, c, k, t) - metrics[f"mG-Pass@{k}"] = compute_mg_pass_at_k(n, c, k) - - return metrics - - @property - def all_metrics(self): - ks: int = self.k - thresholds: list[float] = self.thresholds - - metrics = [] - for k in ks: - for t in thresholds: - metrics.append(f"G-Pass@{k}_{t}") - metrics.append(f"mG-Pass@{k}") - - return metrics diff --git a/src/lighteval/metrics/utils/judge_utils.py b/src/lighteval/metrics/utils/judge_utils.py deleted file mode 100644 index cde25fd26..000000000 --- a/src/lighteval/metrics/utils/judge_utils.py +++ /dev/null @@ -1,127 +0,0 @@ -# MIT License - -# Copyright (c) 2024 The HuggingFace Team - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -import logging - - -logger = logging.getLogger(__name__) - - -def get_judge_prompt_simpleqa(question: str, answer: str, gold: str, **kwargs): - GRADER_TEMPLATE = """ -Your job is to look at a question, a gold target, and a predicted answer, and then assign a grade of either ["CORRECT", "INCORRECT", "NOT_ATTEMPTED"]. -First, I will give examples of each grade, and then you will grade a new example. - - -The following are examples of CORRECT predicted answers. -``` -Question: What are the names of Barack Obama's children? -Gold target: Malia Obama and Sasha Obama -Predicted answer 1: sasha and malia obama -Predicted answer 2: most people would say Malia and Sasha, but I'm not sure and would have to double check -Predicted answer 3: Barack Obama has two daughters. Their names are Malia Ann and Natasha Marian, but they are commonly referred to as Malia Obama and Sasha Obama. Malia was born on July 4, 1998, and Sasha was born on June 10, 2001. -``` -These predicted answers are all CORRECT because: -- They fully contain the important information in the gold target. -- They do not contain any information that contradicts the gold target. -- Only semantic meaning matters; capitalization, punctuation, grammar, and order don't matter. -- Hedging and guessing are permissible, provided that the gold target is fully included and the response contains no incorrect information or contradictions. - - -The following are examples of INCORRECT predicted answers. -``` -Question: What are the names of Barack Obama's children? -Gold target: Malia and Sasha -Predicted answer 1: Malia. -Predicted answer 2: Malia, Sasha, and Susan. -Predicted answer 3: Barack Obama does not have any children. -Predicted answer 4: I think it's either Malia and Sasha. Or it could be Malia and Jackie. Or it could be Joey and Malia. -Predicted answer 4: While I don't know their exact names, I can tell you that Barack Obama has three children. -Predicted answer 5: It's possible you may mean Betsy and Olivia. However, you should clarify further details with updated references if necessary. Is that the correct answer? -Predicted answer 6: It may be the case that Obama's child is named James. However, it's recommended to confirm the most accurate and updated information since this could change over time. This model may not always reflect the most current information. -``` -These predicted answers are all INCORRECT because: -- A factual statement in the answer contradicts the gold target. Incorrect statements that have some hedging (e.g., "it is possible that", "although i'm not sure, i think") are also considered incorrect. - - -The following are examples of NOT_ATTEMPTED predicted answers. -``` -Question: What are the names of Barack Obama's children? -Gold target: Malia and Sasha -Predicted answer 1: I don't know. -Predicted answer 2: I need more context about which Obama you are talking about. -Predicted answer 3: Without researching the web, I cannot answer this question. However, I can tell you that Barack Obama has two children. -Predicted answer 4: Barack Obama has two children. I know that one of them is Malia, but I'm not sure about the other one. -``` -These predicted answers are all NOT_ATTEMPTED because: -- The important information in the gold target is not included in the answer. -- No statements in the answer contradict the gold target. - - -Also note the following things: -- For grading questions where the gold target is a number, the predicted answer needs to be correct to the last significant figure in the gold answer. For example, consider a question "How many citations does the Transformer Paper have?" with gold target "120k". -- Predicted answers "120k", "124k", and 115k" are all CORRECT. -- Predicted answers "100k" and "113k" are INCORRECT. -- Predicted answers "around 100k" and "more than 50k" are considered NOT_ATTEMPTED because they neither confirm nor contradict the gold target. -- The gold target may contain more information than the question. In such cases, the predicted answer only needs to contain the information that is in the question. -- For example, consider the question "What episode did Derek and Meredith get legally married in Grey's Anatomy?" with gold target "Season 7, Episode 20: White Wedding". Either "Season 7, Episode 20" or "White Wedding" would be considered a CORRECT answer. -- Do not punish predicted answers if they omit information that would be clearly inferred from the question. -- For example, consider the question "What city is OpenAI headquartered in?" and the gold target "San Francisco, California". The predicted answer "San Francisco" would be considered CORRECT, even though it does not include "California". -- Consider the question "What award did A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity win at NAACL '24?", the gold target is "Outstanding Paper Award". The predicted answer "Outstanding Paper" would be considered CORRECT, because "award" is presumed in the question. -- For the question "What is the height of Jason Wei in meters?", the gold target is "1.73 m". The predicted answer "1.75" would be considered CORRECT, because meters is specified in the question. -- For the question "What is the name of Barack Obama's wife?", the gold target is "Michelle Obama". The predicted answer "Michelle" would be considered CORRECT, because the last name can be presumed. -- Do not punish for typos in people's name if it's clearly the same name. -- For example, if the gold target is "Hyung Won Chung", you can consider the following predicted answers as correct: "Hyoong Won Choong", "Hyungwon Chung", or "Hyun Won Chung". - - -Here is a new example. Simply reply with either CORRECT, INCORRECT, NOT ATTEMPTED. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer. -``` -Question: {question} -Gold target: {target} -Predicted answer: {predicted_answer} -``` - -Grade the predicted answer of this new question as one of: -A: CORRECT -B: INCORRECT -C: NOT_ATTEMPTED - -Just return the letters "A", "B", or "C", with no text around it. -""".strip() - - content = GRADER_TEMPLATE.format( - question=question, - target=gold, - predicted_answer=answer, - ) - return [{"role": "user", "content": content}] - - -def process_judge_response_simpleqa(response: str) -> float: - if response == "A": - return 1.0 - elif response == "B": - return 0.0 - elif response == "C": - return 0.0 - else: - logger.warning(f"Unknown response from judge: {response}") - return 0.0 diff --git a/src/lighteval/models/dummy/dummy_model.py b/src/lighteval/models/dummy/dummy_model.py index 766d89bb9..ff89656be 100644 --- a/src/lighteval/models/dummy/dummy_model.py +++ b/src/lighteval/models/dummy/dummy_model.py @@ -23,9 +23,9 @@ # inspired by https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/dummy.py import random +from dataclasses import dataclass from typing import Optional -from pydantic import BaseModel from transformers import AutoTokenizer from lighteval.models.abstract_model import LightevalModel, ModelInfo @@ -36,9 +36,11 @@ LoglikelihoodRollingRequest, LoglikelihoodSingleTokenRequest, ) +from lighteval.utils.utils import EnvConfig -class DummyModelConfig(BaseModel, extra="forbid"): +@dataclass +class DummyModelConfig: seed: int = 42 @@ -48,8 +50,10 @@ class DummyModel(LightevalModel): def __init__( self, config: DummyModelConfig, + env_config: EnvConfig, ): self.config = config + self.env_config = env_config self._random = random.Random(self.config.seed) self._tokenizer = None self.model_info = ModelInfo(model_name="dummy", model_sha=str(config.seed)) diff --git a/src/lighteval/models/endpoints/endpoint_model.py b/src/lighteval/models/endpoints/endpoint_model.py index 44f0cebd4..37bb9754e 100644 --- a/src/lighteval/models/endpoints/endpoint_model.py +++ b/src/lighteval/models/endpoints/endpoint_model.py @@ -24,6 +24,7 @@ import logging import re import time +from dataclasses import dataclass, replace from typing import Coroutine, Dict, List, Optional, Union import requests @@ -34,6 +35,7 @@ InferenceEndpoint, InferenceEndpointError, InferenceEndpointTimeoutError, + TextGenerationInputGenerateParameters, TextGenerationInputGrammarType, TextGenerationOutput, create_inference_endpoint, @@ -47,15 +49,15 @@ from lighteval.data import GenerativeTaskDataset, LoglikelihoodDataset from lighteval.models.abstract_model import LightevalModel, ModelInfo +from lighteval.models.model_input import GenerationParameters from lighteval.models.model_output import GenerativeResponse, LoglikelihoodResponse, LoglikelihoodSingleTokenResponse -from lighteval.models.utils import ModelConfig from lighteval.tasks.requests import ( GreedyUntilRequest, LoglikelihoodRequest, LoglikelihoodRollingRequest, LoglikelihoodSingleTokenRequest, ) -from lighteval.utils.utils import as_list +from lighteval.utils.utils import EnvConfig, as_list logger = logging.getLogger(__name__) @@ -74,32 +76,46 @@ ] -class ServerlessEndpointModelConfig(ModelConfig): +@dataclass +class ServerlessEndpointModelConfig: model_name: str add_special_tokens: bool = True + generation_parameters: GenerationParameters = None + def __post_init__(self): + if not self.generation_parameters: + self.generation_parameters = GenerationParameters() -class InferenceEndpointModelConfig(ModelConfig): - endpoint_name: str | None = None - model_name: str | None = None + @classmethod + def from_path(cls, path: str) -> "ServerlessEndpointModelConfig": + import yaml + + with open(path, "r") as f: + config = yaml.safe_load(f)["model"] + return cls(**config["base_params"]) + + +@dataclass +class InferenceEndpointModelConfig: + endpoint_name: str = None + model_name: str = None reuse_existing: bool = False accelerator: str = "gpu" - dtype: str | None = None # if empty, we use the default + model_dtype: str = None # if empty, we use the default vendor: str = "aws" region: str = "us-east-1" # this region has the most hardware options available - instance_size: str | None = None # if none, we autoscale - instance_type: str | None = None # if none, we autoscale + instance_size: str = None # if none, we autoscale + instance_type: str = None # if none, we autoscale framework: str = "pytorch" endpoint_type: str = "protected" add_special_tokens: bool = True revision: str = "main" - namespace: str | None = ( - None # The namespace under which to launch the endpoint. Defaults to the current user's namespace - ) - image_url: str | None = None - env_vars: dict | None = None + namespace: str = None # The namespace under which to launch the endpoint. Defaults to the current user's namespace + image_url: str = None + env_vars: dict = None + generation_parameters: GenerationParameters = None - def model_post_init(self, __context): + def __post_init__(self): # xor operator, one is None but not the other if (self.instance_size is None) ^ (self.instance_type is None): raise ValueError( @@ -109,10 +125,30 @@ def model_post_init(self, __context): if not (self.endpoint_name is None) ^ int(self.model_name is None): raise ValueError("You need to set either endpoint_name or model_name (but not both).") + if not self.generation_parameters: + self.generation_parameters = GenerationParameters() + + @classmethod + def from_path(cls, path: str) -> "InferenceEndpointModelConfig": + """Load configuration for inference endpoint model from YAML file path. + + Args: + path (`str`): Path of the model configuration YAML file. + + Returns: + [`InferenceEndpointModelConfig`]: Configuration for inference endpoint model. + """ + import yaml + + with open(path, "r") as f: + config = yaml.safe_load(f)["model"] + config["base_params"]["model_dtype"] = config["base_params"].pop("dtype", None) + return cls(**config["base_params"], **config.get("instance", {})) + def get_dtype_args(self) -> Dict[str, str]: - if self.dtype is None: + if self.model_dtype is None: return {} - model_dtype = self.dtype.lower() + model_dtype = self.model_dtype.lower() if model_dtype in ["awq", "eetq", "gptq"]: return {"QUANTIZE": model_dtype} if model_dtype == "8bit": @@ -133,7 +169,7 @@ class InferenceEndpointModel(LightevalModel): """ def __init__( # noqa: C901 - self, config: Union[InferenceEndpointModelConfig, ServerlessEndpointModelConfig] + self, config: Union[InferenceEndpointModelConfig, ServerlessEndpointModelConfig], env_config: EnvConfig ) -> None: self.reuse_existing = getattr(config, "reuse_existing", False) self._max_length = None @@ -186,6 +222,7 @@ def __init__( # noqa: C901 region=region, instance_size=instance_size, instance_type=instance_type, + token=env_config.token, custom_image={ "health_route": "/health", "env": { @@ -203,7 +240,9 @@ def __init__( # noqa: C901 ) else: # Endpoint exists logger.info("Reusing existing endpoint.") - self.endpoint = get_inference_endpoint(name=endpoint_name, namespace=config.namespace) + self.endpoint = get_inference_endpoint( + name=endpoint_name, token=env_config.token, namespace=config.namespace + ) else: # Endpoint exists locally but either failed (and most likely it must be scaled up) @@ -263,8 +302,8 @@ def __init__( # noqa: C901 self.endpoint_name = None self.name = config.model_name self.revision = "default" - self.async_client = AsyncInferenceClient(model=config.model_name) - self.client = InferenceClient(model=config.model_name) + self.async_client = AsyncInferenceClient(model=config.model_name, token=env_config.token) + self.client = InferenceClient(model=config.model_name, token=env_config.token) self.use_async = True # set to False for debug - async use is faster @@ -274,11 +313,11 @@ def __init__( # noqa: C901 self.model_info = ModelInfo( model_name=self.name, model_sha=self.revision, - model_dtype=getattr(config, "dtype", "default"), + model_dtype=getattr(config, "model_dtype", "default"), model_size=-1, ) self.generation_parameters = config.generation_parameters - self.generation_config = self.generation_parameters.to_tgi_ie_dict() + self.generation_config = TextGenerationInputGenerateParameters(**self.generation_parameters.to_tgi_ie_dict()) @staticmethod def get_larger_hardware_suggestion(cur_instance_type: str = None, cur_instance_size: str = None): @@ -362,13 +401,16 @@ def _async_process_request( ) -> Coroutine[None, list[TextGenerationOutput], str]: # Todo: add an option to launch with conversational instead for chat prompts # https://huggingface.co/docs/huggingface_hub/v0.20.3/en/package_reference/inference_client#huggingface_hub.AsyncInferenceClient.conversational - self.generation_config["grammar"] = grammar - self.generation_config["stop"] = stop_tokens - self.generation_config["max_new_tokens"] = max_tokens - self.generation_config["details"] = True - self.generation_config["decoder_input_details"] = True + generation_config: TextGenerationInputGenerateParameters = replace( + self.generation_config, + stop=stop_tokens, + max_new_tokens=max_tokens, + details=True, + decoder_input_details=True, + grammar=grammar, + ) - generated_text = self.async_client.text_generation(prompt=context, **self.generation_config) + generated_text = self.async_client.text_generation(prompt=context, generation_config=generation_config) return generated_text @@ -381,15 +423,18 @@ def _process_request( ) -> TextGenerationOutput: # Todo: add an option to launch with conversational instead for chat prompts # https://huggingface.co/docs/huggingface_hub/v0.20.3/en/package_reference/inference_client#huggingface_hub.AsyncInferenceClient.conversational - self.generation_config["stop"] = stop_tokens - self.generation_config["max_new_tokens"] = max_tokens - self.generation_config["details"] = True - self.generation_config["decoder_input_details"] = True - self.generation_config["grammar"] = grammar + generation_config: TextGenerationInputGenerateParameters = replace( + self.generation_config, + stop=stop_tokens, + max_new_tokens=max_tokens, + details=True, + decoder_input_details=True, + grammar=grammar, + ) generated_text = self.client.text_generation( prompt=context, - **self.generation_config, + generation_config=generation_config, ) return generated_text diff --git a/src/lighteval/models/endpoints/inference_providers_model.py b/src/lighteval/models/endpoints/inference_providers_model.py index 9d4bfc08d..deb78fea7 100644 --- a/src/lighteval/models/endpoints/inference_providers_model.py +++ b/src/lighteval/models/endpoints/inference_providers_model.py @@ -22,11 +22,12 @@ import asyncio import logging +from dataclasses import field from typing import Any, List, Optional import yaml from huggingface_hub import AsyncInferenceClient, ChatCompletionOutput -from pydantic import NonNegativeInt +from pydantic import BaseModel, NonNegativeInt from tqdm import tqdm from tqdm.asyncio import tqdm as async_tqdm from transformers import AutoTokenizer @@ -40,7 +41,6 @@ LoglikelihoodResponse, LoglikelihoodSingleTokenResponse, ) -from lighteval.models.utils import ModelConfig from lighteval.tasks.requests import ( GreedyUntilRequest, LoglikelihoodRequest, @@ -52,7 +52,7 @@ logger = logging.getLogger(__name__) -class InferenceProvidersModelConfig(ModelConfig): +class InferenceProvidersModelConfig(BaseModel): """Configuration for InferenceProvidersClient. Args: @@ -63,24 +63,25 @@ class InferenceProvidersModelConfig(ModelConfig): generation_parameters: Parameters for text generation """ - model_name: str + model: str provider: str timeout: int | None = None proxies: Any | None = None parallel_calls_count: NonNegativeInt = 10 + generation_parameters: GenerationParameters = field(default_factory=GenerationParameters) @classmethod def from_path(cls, path): with open(path, "r") as f: config = yaml.safe_load(f)["model"] - model_name = config["model_name"] + model = config["model_name"] provider = config.get("provider", None) timeout = config.get("timeout", None) proxies = config.get("proxies", None) generation_parameters = GenerationParameters.from_dict(config) return cls( - model=model_name, + model=model, provider=provider, timeout=timeout, proxies=proxies, @@ -102,12 +103,12 @@ def __init__(self, config: InferenceProvidersModelConfig) -> None: config: Configuration object containing model and provider settings """ self.model_info = ModelInfo( - model_name=config.model_name, + model_name=config.model, model_sha="", model_dtype=None, model_size="", ) - self.model_name = config.model_name + self.model = config.model self.provider = config.provider self.generation_parameters = config.generation_parameters @@ -122,7 +123,7 @@ def __init__(self, config: InferenceProvidersModelConfig) -> None: timeout=config.timeout, proxies=config.proxies, ) - self._tokenizer = AutoTokenizer.from_pretrained(self.model_name) + self._tokenizer = AutoTokenizer.from_pretrained(self.model) def _encode(self, text: str) -> dict: enc = self._tokenizer(text=text) @@ -148,7 +149,7 @@ async def __call_api(self, prompt: List[dict], num_samples: int) -> Optional[Cha for attempt in range(self.API_MAX_RETRY): try: kwargs = { - "model": self.model_name, + "model": self.model, "messages": prompt, "n": num_samples, } diff --git a/src/lighteval/models/endpoints/tgi_model.py b/src/lighteval/models/endpoints/tgi_model.py index 525f1259e..f0bb712b6 100644 --- a/src/lighteval/models/endpoints/tgi_model.py +++ b/src/lighteval/models/endpoints/tgi_model.py @@ -21,7 +21,7 @@ # SOFTWARE. import asyncio -from dataclasses import replace +from dataclasses import dataclass, replace from typing import Coroutine, Optional import requests @@ -29,7 +29,7 @@ from transformers import AutoTokenizer from lighteval.models.endpoints.endpoint_model import InferenceEndpointModel, ModelInfo -from lighteval.models.utils import ModelConfig +from lighteval.models.model_input import GenerationParameters from lighteval.utils.imports import NO_TGI_ERROR_MSG, is_tgi_available @@ -46,10 +46,32 @@ def divide_chunks(array, n): yield array[i : i + n] -class TGIModelConfig(ModelConfig): - inference_server_address: str | None - inference_server_auth: str | None - model_name: str | None +@dataclass +class TGIModelConfig: + inference_server_address: str + inference_server_auth: str + model_id: str + generation_parameters: GenerationParameters = None + + def __post_init__(self): + if not self.generation_parameters: + self.generation_parameters = GenerationParameters() + + @classmethod + def from_path(cls, path: str) -> "TGIModelConfig": + """Load configuration for TGI endpoint model from YAML file path. + + Args: + path (`str`): Path of the model configuration YAML file. + + Returns: + [`TGIModelConfig`]: Configuration for TGI endpoint model. + """ + import yaml + + with open(path, "r") as f: + config = yaml.safe_load(f)["model"] + return cls(**config["instance"], generation_parameters=GenerationParameters.from_dict(config)) # inherit from InferenceEndpointModel instead of LightevalModel since they both use the same interface, and only overwrite diff --git a/src/lighteval/models/litellm_model.py b/src/lighteval/models/litellm_model.py index 8d0a5e149..5c4235601 100644 --- a/src/lighteval/models/litellm_model.py +++ b/src/lighteval/models/litellm_model.py @@ -23,19 +23,21 @@ import logging import time from concurrent.futures import ThreadPoolExecutor +from dataclasses import dataclass from typing import Optional +import yaml from tqdm import tqdm from lighteval.data import GenerativeTaskDataset from lighteval.models.abstract_model import LightevalModel from lighteval.models.endpoints.endpoint_model import ModelInfo +from lighteval.models.model_input import GenerationParameters from lighteval.models.model_output import ( GenerativeResponse, LoglikelihoodResponse, LoglikelihoodSingleTokenResponse, ) -from lighteval.models.utils import ModelConfig from lighteval.tasks.requests import ( GreedyUntilRequest, LoglikelihoodRequest, @@ -59,29 +61,53 @@ litellm.cache = Cache(type="disk") -class LiteLLMModelConfig(ModelConfig): - model_name: str - provider: str | None = None - base_url: str | None = None - api_key: str | None = None +@dataclass +class LiteLLMModelConfig: + model: str + provider: Optional[str] = None + base_url: Optional[str] = None + api_key: Optional[str] = None + generation_parameters: GenerationParameters = None + + def __post_init__(self): + if self.generation_parameters is None: + self.generation_parameters = GenerationParameters() + + @classmethod + def from_path(cls, path): + with open(path, "r") as f: + config = yaml.safe_load(f)["model"] + + model = config["base_params"]["model_name"] + provider = config["base_params"].get("provider", None) + base_url = config["base_params"].get("base_url", None) + api_key = config["base_params"].get("api_key", None) + generation_parameters = GenerationParameters.from_dict(config) + return cls( + model=model, + provider=provider, + base_url=base_url, + generation_parameters=generation_parameters, + api_key=api_key, + ) class LiteLLMClient(LightevalModel): _DEFAULT_MAX_LENGTH: int = 4096 - def __init__(self, config) -> None: + def __init__(self, config, env_config) -> None: """ IMPORTANT: Your API keys should be set in the environment variables. If a base_url is not set, it will default to the public API. """ self.model_info = ModelInfo( - model_name=config.model_name, + model_name=config.model, model_sha="", model_dtype=None, model_size="", ) - self.model = config.model_name - self.provider = config.provider or config.model_name.split("/")[0] + self.model = config.model + self.provider = config.provider or config.model.split("/")[0] self.base_url = config.base_url self.api_key = config.api_key self.generation_parameters = config.generation_parameters diff --git a/src/lighteval/models/model_input.py b/src/lighteval/models/model_input.py index 18ad7bdbf..7eb1c6bd6 100644 --- a/src/lighteval/models/model_input.py +++ b/src/lighteval/models/model_input.py @@ -20,31 +20,32 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +from dataclasses import asdict, dataclass +from typing import Optional -from pydantic import BaseModel, NonNegativeFloat, NonNegativeInt +@dataclass +class GenerationParameters: + early_stopping: Optional[bool] = None # vllm, transformers + repetition_penalty: Optional[float] = None # vllm, transformers, tgi, sglang + frequency_penalty: Optional[float] = None # vllm, tgi, sglang + length_penalty: Optional[float] = None # vllm, transformers + presence_penalty: Optional[float] = None # vllm, sglang -class GenerationParameters(BaseModel, extra="forbid"): - early_stopping: bool | None = None # transformers - repetition_penalty: NonNegativeFloat | None = None # vllm, transformers, tgi, sglang - frequency_penalty: NonNegativeFloat | None = None # vllm, tgi, sglang - length_penalty: NonNegativeFloat | None = None # vllm, transformers - presence_penalty: NonNegativeFloat | None = None # vllm, sglang + max_new_tokens: Optional[int] = None # vllm, transformers, tgi, litellm, sglang + min_new_tokens: Optional[int] = None # vllm, transformers, sglang - max_new_tokens: NonNegativeInt | None = None # vllm, transformers, tgi, litellm, sglang - min_new_tokens: NonNegativeInt | None = None # vllm, transformers, sglang - - seed: NonNegativeInt | None = None # vllm, tgi, litellm - stop_tokens: list[str] | None = None # vllm, transformers, tgi, litellm, sglang - temperature: NonNegativeFloat | None = None # vllm, transformers, tgi, litellm, sglang - top_k: NonNegativeInt | None = None # vllm, transformers, tgi, sglang - min_p: NonNegativeFloat | None = None # vllm, transformers, sglang - top_p: NonNegativeFloat | None = None # vllm, transformers, tgi, litellm, sglang - truncate_prompt: bool | None = None # vllm, tgi + seed: Optional[int] = None # vllm, tgi, litellm + stop_tokens: Optional[list[str]] = None # vllm, transformers, tgi, litellm, sglang + temperature: Optional[float] = None # vllm, transformers, tgi, litellm, sglang + top_k: Optional[int] = None # vllm, transformers, tgi, sglang + min_p: Optional[float] = None # vllm, transformers, sglang + top_p: Optional[int] = None # vllm, transformers, tgi, litellm, sglang + truncate_prompt: Optional[bool] = None # vllm, tgi # response format to be followed by the model, # more info here https://platform.openai.com/docs/api-reference/chat/create#chat-create-response_format - response_format: str | None = None # inference_providers + response_format: Optional[str] = None # inference_providers @classmethod def from_dict(cls, config_dict: dict): @@ -72,7 +73,7 @@ def from_model_args(cls, model_args: str): Args: model_args (str): A string like the following: - "pretrained=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B,dtype=float16,max_model_length=32768,generation_parameters={temperature:0.7,top_p:5}" + "pretrained=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B,dtype=float16,max_model_length=32768,generation={temperature:0.7,top_p:5}" """ def parse_model_args(model_args): @@ -144,7 +145,7 @@ def to_vllm_dict(self) -> dict: # Task specific sampling params to set in model: n, best_of, use_beam_search # Generation specific params to set in model: logprobs, prompt_logprobs - x = {sampling_params_to_vllm_naming.get(k, k): v for k, v in self.model_dump().items() if v is not None} + x = {sampling_params_to_vllm_naming.get(k, k): v for k, v in asdict(self).items() if v is not None} # VLLM max_tokens is 16 by default, however the pipeline expect the max_tokens to be None, if the user didn't specify it if not x.get("max_tokens"): x["max_tokens"] = None @@ -159,7 +160,7 @@ def to_vllm_openai_dict(self) -> dict: """ # Task specific sampling params to set in model: n, best_of, use_beam_search # Generation specific params to set in model: logprobs, prompt_logprobs - return {k: v for k, v in self.model_dump().items() if v is not None} + return {k: v for k, v in asdict(self).items() if v is not None} def to_transformers_dict(self) -> dict: """Selects relevant generation and sampling parameters for transformers models. diff --git a/src/lighteval/models/model_loader.py b/src/lighteval/models/model_loader.py index 4da1af0b6..4c8520d43 100644 --- a/src/lighteval/models/model_loader.py +++ b/src/lighteval/models/model_loader.py @@ -52,6 +52,7 @@ is_tgi_available, is_vllm_available, ) +from lighteval.utils.utils import EnvConfig logger = logging.getLogger(__name__) @@ -71,6 +72,7 @@ def load_model( # noqa: C901 SGLangModelConfig, InferenceProvidersModelConfig, ], + env_config: EnvConfig, ) -> Union[TransformersModel, AdapterModel, DeltaModel, ModelClient, DummyModel]: """Will load either a model from an inference server or a model from a checkpoint, depending on the config type. @@ -92,25 +94,25 @@ def load_model( # noqa: C901 return load_model_with_tgi(config) if isinstance(config, InferenceEndpointModelConfig) or isinstance(config, ServerlessEndpointModelConfig): - return load_model_with_inference_endpoints(config) + return load_model_with_inference_endpoints(config, env_config=env_config) if isinstance(config, TransformersModelConfig): - return load_model_with_accelerate_or_default(config) + return load_model_with_accelerate_or_default(config=config, env_config=env_config) if isinstance(config, DummyModelConfig): - return load_dummy_model(config) + return load_dummy_model(config=config, env_config=env_config) if isinstance(config, VLLMModelConfig): - return load_model_with_accelerate_or_default(config) + return load_model_with_accelerate_or_default(config=config, env_config=env_config) if isinstance(config, SGLangModelConfig): - return load_sglang_model(config) + return load_sglang_model(config=config, env_config=env_config) if isinstance(config, OpenAIModelConfig): - return load_openai_model(config) + return load_openai_model(config=config, env_config=env_config) if isinstance(config, LiteLLMModelConfig): - return load_litellm_model(config) + return load_litellm_model(config=config, env_config=env_config) if isinstance(config, InferenceProvidersModelConfig): return load_inference_providers_model(config=config) @@ -127,57 +129,59 @@ def load_model_with_tgi(config: TGIModelConfig): return model -def load_litellm_model(config: LiteLLMModelConfig): +def load_litellm_model(config: LiteLLMModelConfig, env_config: EnvConfig): if not is_litellm_available(): raise ImportError(NO_LITELLM_ERROR_MSG) - model = LiteLLMClient(config) + model = LiteLLMClient(config, env_config) return model -def load_openai_model(config: OpenAIModelConfig): +def load_openai_model(config: OpenAIModelConfig, env_config: EnvConfig): if not is_openai_available(): raise ImportError() - model = OpenAIClient(config) + model = OpenAIClient(config, env_config) return model -def load_model_with_inference_endpoints(config: Union[InferenceEndpointModelConfig, ServerlessEndpointModelConfig]): +def load_model_with_inference_endpoints( + config: Union[InferenceEndpointModelConfig, ServerlessEndpointModelConfig], env_config: EnvConfig +): logger.info("Spin up model using inference endpoint.") - model = InferenceEndpointModel(config=config) + model = InferenceEndpointModel(config=config, env_config=env_config) return model def load_model_with_accelerate_or_default( - config: Union[AdapterModelConfig, TransformersModelConfig, DeltaModelConfig], + config: Union[AdapterModelConfig, TransformersModelConfig, DeltaModelConfig], env_config: EnvConfig ): if isinstance(config, AdapterModelConfig): - model = AdapterModel(config=config) + model = AdapterModel(config=config, env_config=env_config) elif isinstance(config, DeltaModelConfig): - model = DeltaModel(config=config) + model = DeltaModel(config=config, env_config=env_config) elif isinstance(config, VLLMModelConfig): if not is_vllm_available(): raise ImportError(NO_VLLM_ERROR_MSG) - model = VLLMModel(config=config) + model = VLLMModel(config=config, env_config=env_config) return model else: - model = TransformersModel(config=config) + model = TransformersModel(config=config, env_config=env_config) return model -def load_dummy_model(config: DummyModelConfig): - return DummyModel(config=config) +def load_dummy_model(config: DummyModelConfig, env_config: EnvConfig): + return DummyModel(config=config, env_config=env_config) def load_inference_providers_model(config: InferenceProvidersModelConfig): return InferenceProvidersClient(config=config) -def load_sglang_model(config: SGLangModelConfig): +def load_sglang_model(config: SGLangModelConfig, env_config: EnvConfig): if not is_sglang_available(): raise ImportError(NO_SGLANG_ERROR_MSG) - return SGLangModel(config=config) + return SGLangModel(config=config, env_config=env_config) diff --git a/src/lighteval/models/sglang/sglang_model.py b/src/lighteval/models/sglang/sglang_model.py index 6fd6a1dab..2f758be6b 100644 --- a/src/lighteval/models/sglang/sglang_model.py +++ b/src/lighteval/models/sglang/sglang_model.py @@ -22,25 +22,26 @@ import gc import logging +from dataclasses import dataclass from typing import Optional import torch -from pydantic import PositiveFloat, PositiveInt from tqdm import tqdm from lighteval.data import GenerativeTaskDataset, LoglikelihoodDataset from lighteval.models.abstract_model import LightevalModel, ModelInfo +from lighteval.models.model_input import GenerationParameters from lighteval.models.model_output import ( GenerativeResponse, LoglikelihoodResponse, ) -from lighteval.models.utils import ModelConfig, _simplify_name +from lighteval.models.utils import _get_dtype, _simplify_name from lighteval.tasks.requests import ( GreedyUntilRequest, LoglikelihoodRequest, ) from lighteval.utils.imports import is_sglang_available -from lighteval.utils.utils import as_list +from lighteval.utils.utils import EnvConfig, as_list logger = logging.getLogger(__name__) @@ -56,14 +57,15 @@ get_tokenizer = None -class SGLangModelConfig(ModelConfig): - model_name: str +@dataclass +class SGLangModelConfig: + pretrained: str load_format: str = "auto" dtype: str = "auto" - tp_size: PositiveInt = 1 # how many GPUs to use for tensor parallelism - dp_size: PositiveInt = 1 # how many GPUs to use for data parallelism - context_length: PositiveInt | None = None - random_seed: PositiveInt | None = 1234 + tp_size: int = 1 # how many GPUs to use for tensor parallelism + dp_size: int = 1 # how many GPUs to use for data parallelism + context_length: int | None = None + random_seed: Optional[int] = 1234 trust_remote_code: bool = False use_chat_template: bool = False device: str = "cuda" @@ -72,28 +74,34 @@ class SGLangModelConfig(ModelConfig): add_special_tokens: bool = True pairwise_tokenization: bool = False sampling_backend: str | None = None - attention_backend: str | None = None - mem_fraction_static: PositiveFloat = 0.8 - chunked_prefill_size: PositiveInt = 4096 + attention_backend: str = None + mem_fraction_static: float = 0.8 + chunked_prefill_size: int = 4096 + generation_parameters: GenerationParameters = None + + def __post_init__(self): + if not self.generation_parameters: + self.generation_parameters = GenerationParameters() class SGLangModel(LightevalModel): def __init__( self, config: SGLangModelConfig, + env_config: EnvConfig, ): """Initializes a HuggingFace `AutoModel` and `AutoTokenizer` for evaluation.""" self._config = config self.use_chat_template = config.use_chat_template - self.data_parallel_size = config.dp_size - self.tensor_parallel_size = config.tp_size - self._add_special_tokens = config.add_special_tokens - self._tokenizer = self._create_auto_tokenizer(config) - self._max_length = config.context_length if config.context_length is not None else None - self.model = self._create_auto_model(config) - self.model_name = _simplify_name(config.model_name) + self.data_parallel_size = int(config.dp_size) + self.tensor_parallel_size = int(config.tp_size) + self._add_special_tokens = bool(config.add_special_tokens) + self._tokenizer = self._create_auto_tokenizer(config, env_config) + self._max_length = int(config.context_length) if config.context_length is not None else None + self.model = self._create_auto_model(config, env_config) + self.model_name = _simplify_name(config.pretrained) self.model_sha = "" # config.get_model_sha() - self.precision = config.dtype + self.precision = _get_dtype(config.dtype, config=self._config) self.sampling_params = config.generation_parameters.to_sglang_dict() self.model_info = ModelInfo(model_name=self.model_name, model_sha=self.model_sha) self.sampling_backend = config.sampling_backend @@ -120,22 +128,22 @@ def add_special_tokens(self): def max_length(self) -> int: return self._max_length - def _create_auto_model(self, config: SGLangModelConfig) -> Optional[Engine]: + def _create_auto_model(self, config: SGLangModelConfig, env_config: EnvConfig) -> Optional[Engine]: self.model_args = { - "model_path": config.model_name, + "model_path": config.pretrained, "trust_remote_code": config.trust_remote_code, "dtype": config.dtype, "device": "cuda", "random_seed": config.random_seed, "load_format": config.load_format, "context_length": self._max_length, - "dp_size": config.dp_size, - "tp_size": config.tp_size, + "dp_size": int(config.dp_size), + "tp_size": int(config.tp_size), "sampling_backend": config.sampling_backend, "attention_backend": config.attention_backend, - "mem_fraction_static": config.mem_fraction_static, + "mem_fraction_static": float(config.mem_fraction_static), "schedule_policy": "fcfs", - "chunked_prefill_size": config.chunked_prefill_size, + "chunked_prefill_size": int(config.chunked_prefill_size), "disable_radix_cache": True, } model = Engine(**self.model_args) @@ -145,9 +153,9 @@ def _create_auto_model(self, config: SGLangModelConfig) -> Optional[Engine]: return model - def _create_auto_tokenizer(self, config: SGLangModelConfig): + def _create_auto_tokenizer(self, config: SGLangModelConfig, env_config: EnvConfig): tokenizer = get_tokenizer( - config.model_name, + config.pretrained, tokenizer_mode="auto", trust_remote_code=config.trust_remote_code, tokenizer_revision="main", diff --git a/src/lighteval/models/transformers/adapter_model.py b/src/lighteval/models/transformers/adapter_model.py index fd341542c..4ce3c7f20 100644 --- a/src/lighteval/models/transformers/adapter_model.py +++ b/src/lighteval/models/transformers/adapter_model.py @@ -22,14 +22,15 @@ import logging from contextlib import nullcontext +from dataclasses import dataclass import torch -import transformers -from transformers import AutoModelForCausalLM +from transformers import AutoModelForCausalLM, PreTrainedTokenizer from lighteval.models.transformers.transformers_model import TransformersModel, TransformersModelConfig from lighteval.models.utils import _get_dtype from lighteval.utils.imports import NO_PEFT_ERROR_MSG, is_peft_available +from lighteval.utils.utils import EnvConfig logger = logging.getLogger(__name__) @@ -38,41 +39,50 @@ from peft import PeftModel +@dataclass class AdapterModelConfig(TransformersModelConfig): # Adapter models have the specificity that they look at the base model (= the parent) for the tokenizer and config - base_model: str - adapter_weights: bool + base_model: str = None - def model_post_init(self, __context): + def __post_init__(self): if not is_peft_available(): raise ImportError(NO_PEFT_ERROR_MSG) + if not self.base_model: # must have a default value bc of dataclass inheritance, but can't actually be None + raise ValueError("The base_model argument must not be null for an adapter model config") -class AdapterModel(TransformersModel): - def _create_auto_model(self) -> transformers.PreTrainedModel: - """Returns a PeftModel from a base model and a version fined tuned using PEFT.""" - torch_dtype = _get_dtype(self.config.dtype) - model_parallel, max_memory, device_map = self.init_model_parallel(self.config.model_parallel) - self.config.model_parallel = model_parallel + return super().__post_init__() - adapter_weights = self.config.pretrained - merged_path = f"{adapter_weights}-adapter-applied" + def init_configs(self, env_config: EnvConfig): + return self._init_configs(self.base_model, env_config) - if self.config.dtype == "4bit": - from transformers import BitsAndBytesConfig - quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16) - elif self.config.dtype == "8bit": - from transformers import BitsAndBytesConfig +class AdapterModel(TransformersModel): + def _create_auto_tokenizer(self, config: AdapterModelConfig, env_config: EnvConfig) -> PreTrainedTokenizer: + # By default, we look at the model config for the model stored in `base_model` + # (= the parent model, not the model of interest) + return self._create_auto_tokenizer_with_name( + model_name=config.base_model, + revision=config.revision, + env_config=env_config, + tokenizer_name=config.tokenizer, + subfolder=config.subfolder, + trust_remote_code=config.trust_remote_code, + ) - quantization_config = BitsAndBytesConfig(load_in_8bit=True) - else: - quantization_config = None + def _create_auto_model(self, config: AdapterModelConfig, env_config: EnvConfig) -> AutoModelForCausalLM: + """Returns a PeftModel from a base model and a version fined tuned using PEFT.""" + torch_dtype = _get_dtype(config.dtype, self._config) + config.model_parallel, max_memory, device_map = self.init_model_parallel(config.model_parallel) + + adapter_weights = config.pretrained + + merged_path = f"{adapter_weights}-adapter-applied" if self.accelerator.is_local_main_process if self.accelerator is not None else nullcontext(): - logger.info(f"Loading model from {adapter_weights} and applying adapter to {self.config.base_model}") + logger.info(f"Loading model from {adapter_weights} and applying adapter to {config.base_model}") base = AutoModelForCausalLM.from_pretrained( - self.config.base_model, torch_dtype=torch.float16, low_cpu_mem_usage=True + config.base_model, torch_dtype=torch.float16, low_cpu_mem_usage=True, token=env_config.token ) # resize model for adapters with added tokens token_diff = len(self._tokenizer) - base.config.vocab_size @@ -100,8 +110,10 @@ def _create_auto_model(self) -> transformers.PreTrainedModel: max_memory=max_memory, device_map=device_map, torch_dtype=torch_dtype, - trust_remote_code=self.config.trust_remote_code, - quantization_config=quantization_config, + trust_remote_code=config.trust_remote_code, + cache_dir=env_config.cache_dir, + quantization_config=config.quantization_config, + token=env_config.token, ) return model diff --git a/src/lighteval/models/transformers/delta_model.py b/src/lighteval/models/transformers/delta_model.py index 51395b424..40a91992a 100644 --- a/src/lighteval/models/transformers/delta_model.py +++ b/src/lighteval/models/transformers/delta_model.py @@ -22,6 +22,7 @@ import logging from contextlib import nullcontext +from dataclasses import dataclass import torch from tqdm import tqdm @@ -29,24 +30,34 @@ from lighteval.models.transformers.transformers_model import TransformersModel, TransformersModelConfig from lighteval.models.utils import _get_dtype, _get_model_sha +from lighteval.utils.utils import EnvConfig logger = logging.getLogger(__name__) +@dataclass class DeltaModelConfig(TransformersModelConfig): # Delta models look at the pretrained (= the delta weights) for the tokenizer and model config - base_model: str - delta_weights: bool + base_model: str = None + + def __post_init__(self): + self.revision = "main" + + if not self.base_model: # must have a default value bc of dataclass inheritance, but can't actually be None + raise ValueError("The base_model argument must not be null for a delta model config") + + return super().__post_init__() def get_model_sha(self): - return _get_model_sha(repo_id=self.model_name, revision="main") + return _get_model_sha(repo_id=self.pretrained, revision="main") class DeltaModel(TransformersModel): def _create_auto_model( self, config: DeltaModelConfig, + env_config: EnvConfig, ) -> AutoModelForCausalLM: """Returns a model created by adding the weights of a delta model to a base model.""" config.model_parallel, max_memory, device_map = self.init_model_parallel(config.model_parallel) @@ -59,13 +70,14 @@ def _create_auto_model( if self.accelerator.is_main_process if self.accelerator is not None else nullcontext(): logger.info(f"Loading base and delta models from {config.base_model} and {delta_model}") base = AutoModelForCausalLM.from_pretrained( - config.base_model, torch_dtype=torch.float16, low_cpu_mem_usage=True + config.base_model, torch_dtype=torch.float16, low_cpu_mem_usage=True, token=env_config.token ) delta = AutoModelForCausalLM.from_pretrained( delta_model, revision=config.revision + (f"/{config.subfolder}" if config.subfolder is not None else ""), torch_dtype=torch.float16, low_cpu_mem_usage=True, + token=env_config.token, ) for name, param in tqdm(base.state_dict().items(), desc="Applying delta"): @@ -83,7 +95,9 @@ def _create_auto_model( device_map=device_map, torch_dtype=torch_dtype, trust_remote_code=config.trust_remote_code, + cache_dir=env_config.cache_dir, quantization_config=config.quantization_config, + token=env_config.token, ) return model diff --git a/src/lighteval/models/transformers/transformers_model.py b/src/lighteval/models/transformers/transformers_model.py index dae0d3f09..e35aadcba 100644 --- a/src/lighteval/models/transformers/transformers_model.py +++ b/src/lighteval/models/transformers/transformers_model.py @@ -22,12 +22,13 @@ import logging import os +import warnings +from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn.functional as F import transformers -from pydantic import PositiveInt from torch.nn.utils.rnn import pad_sequence from torch.utils.data import DataLoader from tqdm import tqdm @@ -36,13 +37,15 @@ AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, + GPTQConfig, PretrainedConfig, ) -from transformers.generation.utils import GenerateOutput +from transformers.generation.utils import GenerateOutput, GenerationConfig from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from lighteval.data import GenerativeTaskDataset, LoglikelihoodDataset, LoglikelihoodSingleTokenDataset from lighteval.models.abstract_model import LightevalModel, ModelInfo +from lighteval.models.model_input import GenerationParameters from lighteval.models.model_output import ( Batch, GenerativeMultiturnResponse, @@ -50,7 +53,7 @@ LoglikelihoodResponse, LoglikelihoodSingleTokenResponse, ) -from lighteval.models.utils import ModelConfig, _get_dtype, _get_model_sha, _simplify_name +from lighteval.models.utils import _get_dtype, _get_model_sha, _simplify_name, batched from lighteval.tasks.requests import ( GreedyUntilMultiTurnRequest, GreedyUntilRequest, @@ -60,19 +63,21 @@ Request, ) from lighteval.utils.imports import ( + NO_AUTOGPTQ_ERROR_MSG, + NO_BNB_ERROR_MSG, is_accelerate_available, + is_autogptq_available, + is_bnb_available, ) from lighteval.utils.parallelism import find_executable_batch_size -from lighteval.utils.utils import as_list +from lighteval.utils.utils import EnvConfig, as_list, boolstring_to_bool logger = logging.getLogger(__name__) if is_accelerate_available(): - from datetime import timedelta - - from accelerate import Accelerator, InitProcessGroupKwargs + from accelerate import Accelerator from accelerate.utils import calculate_maximum_sizes, convert_bytes, get_max_memory os.environ["TOKENIZERS_PARALLELISM"] = "false" @@ -80,12 +85,13 @@ STARTING_BATCH_SIZE = 512 -class TransformersModelConfig(ModelConfig): +@dataclass +class TransformersModelConfig: """ Base configuration class for models. Attributes: - model_name (str): + pretrained (str): HuggingFace Hub model ID name or the path to a pre-trained model to load. This is effectively the `pretrained_model_name_or_path` argument of `from_pretrained` in the HuggingFace `transformers` API. @@ -106,7 +112,7 @@ class TransformersModelConfig(ModelConfig): add_special_tokens (bool, optional, defaults to True): Whether to add special tokens to the input sequences. If `None`, the default value will be set to `True` for seq2seq models (e.g. T5) and `False` for causal models. - model_parallel (bool, optional, defaults to None): + model_parallel (bool, optional, defaults to False): True/False: force to use or not the `accelerate` library to load a large model across multiple devices. Default: None which corresponds to comparing the number of processes with @@ -132,74 +138,140 @@ class TransformersModelConfig(ModelConfig): """ - model_name: str - tokenizer: str | None = None - subfolder: str | None = None + pretrained: str + accelerator: "Accelerator" = None + tokenizer: Optional[str] = None + multichoice_continuations_start_space: Optional[bool] = None + pairwise_tokenization: bool = False + subfolder: Optional[str] = None revision: str = "main" - batch_size: PositiveInt | None = None - generation_size: PositiveInt = 256 - max_length: PositiveInt | None = None + batch_size: int = -1 + max_gen_toks: Optional[int] = 256 + max_length: Optional[int] = None add_special_tokens: bool = True - model_parallel: bool | None = None - dtype: str | None = None + model_parallel: Optional[bool] = None + dtype: Optional[Union[str, torch.dtype]] = None device: Union[int, str] = "cuda" + quantization_config: Optional[BitsAndBytesConfig] = None trust_remote_code: bool = False use_chat_template: bool = False compile: bool = False - multichoice_continuations_start_space: bool | None = None - pairwise_tokenization: bool = False + generation_parameters: GenerationParameters = None + generation_config: GenerationConfig = None - def model_post_init(self, __context): - if self.multichoice_continuations_start_space is True: - logger.warning( - "You set `multichoice_continuations_start_space` to true. This will force multichoice continuations to use a starting space" - ) - if self.multichoice_continuations_start_space is False: - logger.warning( - "You set `multichoice_continuations_start_space` to false. This will remove a leading space from multichoice continuations, if present." + def __post_init__(self): + # Making sure this parameter is a boolean + self.multichoice_continuations_start_space = boolstring_to_bool(self.multichoice_continuations_start_space) + + if self.multichoice_continuations_start_space is not None: + if self.multichoice_continuations_start_space: + logger.info( + "You set `multichoice_continuations_start_space` to true. This will force multichoice continuations to use a starting space" + ) + else: + logger.info( + "You set `multichoice_continuations_start_space` to false. This will remove a leading space from multichoice continuations, if present." + ) + + self.model_parallel = boolstring_to_bool(self.model_parallel) + self.compile = boolstring_to_bool(self.compile) + + if self.quantization_config is not None and not is_bnb_available(): + raise ImportError(NO_BNB_ERROR_MSG) + + if not isinstance(self.pretrained, str): + raise ValueError("Pretrained model name must be passed as string.") + if not isinstance(self.device, str): + raise ValueError("Current device must be passed as string.") + + if self.generation_config and self.generation_parameters: + raise ValueError( + "Can't use both generation_config and generation_parameters argument. Pass the generation parameters to your generation config object" ) - def get_transformers_config(self) -> PretrainedConfig: - revision = self.revision + if not self.generation_parameters and not self.generation_config: + self.generation_parameters = GenerationParameters() + def _init_configs(self, model_name: str, env_config: EnvConfig) -> PretrainedConfig: + revision = self.revision if self.subfolder: revision = f"{self.revision}/{self.subfolder}" - auto_config = AutoConfig.from_pretrained( - self.model_name, + model_name, revision=revision, trust_remote_code=self.trust_remote_code, + cache_dir=env_config.cache_dir, + token=env_config.token, ) + # Gathering the model's automatic quantization config, if available + try: + model_auto_quantization_config = auto_config.quantization_config + logger.info("An automatic quantization config was found in the model's config. Using it to load the model") + except (AttributeError, KeyError): + model_auto_quantization_config = None + + if model_auto_quantization_config is not None: + if self.quantization_config is not None: + # We don't load models quantized by default with a different user provided conf + raise ValueError("You manually requested quantization on a model already quantized!") + + # We add the quantization to the model params we store + if model_auto_quantization_config["quant_method"] == "gptq": + if not is_autogptq_available(): + raise ImportError(NO_AUTOGPTQ_ERROR_MSG) + auto_config.quantization_config["use_exllama"] = None + self.quantization_config = GPTQConfig(**auto_config.quantization_config, disable_exllama=True) + elif model_auto_quantization_config["quant_method"] == "bitsandbytes": + if not is_bnb_available(): + raise ImportError(NO_BNB_ERROR_MSG) + self.quantization_config = BitsAndBytesConfig(**auto_config.quantization_config) + return auto_config + def init_configs(self, env_config: EnvConfig) -> PretrainedConfig: + return self._init_configs(self.pretrained, env_config=env_config) + def get_model_sha(self): - return _get_model_sha(repo_id=self.model_name, revision=self.revision) + return _get_model_sha(repo_id=self.pretrained, revision=self.revision) + + +@dataclass +class BaseModelConfig(TransformersModelConfig): + def __post_init__(self): + super().__post_init__() + + warnings.warn( + "BaseModelConfig is deprecated and will be removed. Use TransformersModelConfig instead", + FutureWarning, + ) class TransformersModel(LightevalModel): def __init__( self, + env_config: EnvConfig, config: TransformersModelConfig, ): """Initializes a HuggingFace `AutoModel` and `AutoTokenizer` for evaluation.""" - self.config = config - self.accelerator = Accelerator(kwargs_handlers=[InitProcessGroupKwargs(timeout=timedelta(seconds=3000))]) - self._device = self.accelerator.device + self._config = config.init_configs(env_config) + self.accelerator = config.accelerator + self._max_length = self._init_max_length(config.max_length) self.use_chat_template = config.use_chat_template - self.multichoice_continuations_start_space = config.multichoice_continuations_start_space - self._add_special_tokens = config.add_special_tokens or False - self.pairwise_tokenization = config.pairwise_tokenization - self.batch_size = config.batch_size - self.transformers_config = config.get_transformers_config() - self.model_sha = config.get_model_sha() - self._max_length = self._init_max_length() - self._tokenizer = self._create_auto_tokenizer() - self.model = self._create_auto_model() + self._add_special_tokens = config.add_special_tokens if config.add_special_tokens is not None else False + self._tokenizer = self._create_auto_tokenizer(config, env_config) + + # If model_parallel is not set we compare the number of processes with the number of GPUs + self.model = self._create_auto_model(config, env_config) + self.model.eval() + torch.set_grad_enabled(False) + + self._device = config.accelerator.device if config.accelerator is not None else "cpu" + self.multichoice_continuations_start_space = config.multichoice_continuations_start_space # We are in DP (and launch the script with `accelerate launch`) - if config.model_parallel is False and self.config.dtype not in ["4bit", "8bit"]: + if not config.model_parallel and not isinstance(config.quantization_config, BitsAndBytesConfig): logger.info(f"Using Data Parallelism, putting model on device {self._device}") self.model = self.model.to(self._device) if config.compile: @@ -209,27 +281,35 @@ def __init__( except AttributeError as e: logger.warning("Could not compile the model because: ", e) - self.model_name = _simplify_name(config.model_name) + self.model_name = _simplify_name(config.pretrained) + self.model_sha = config.get_model_sha() - self.generation_config_dict = config.generation_parameters.to_transformers_dict() + self.precision = _get_dtype(config.dtype, config=self._config) + if config.generation_config is None: + self.generation_parameters = config.generation_parameters + self.generation_config_dict = self.generation_parameters.to_transformers_dict() + else: + self.generation_config_dict = config.generation_config.to_dict() if is_accelerate_available(): model_size, _ = calculate_maximum_sizes(self.model) model_size = convert_bytes(model_size) else: model_size = -1 - self.model_info = ModelInfo( - model_name=self.config.model_name, + model_name=self.model_name, model_sha=self.model_sha, - model_dtype=config.dtype, - model_size=str(model_size), + model_dtype=self.precision, + model_size=model_size, ) + self.pairwise_tokenization = config.pairwise_tokenization + @classmethod def from_model( cls, model: Union[AutoModelForCausalLM, LightevalModel], + env_config: EnvConfig, accelerator: "Accelerator" = None, tokenizer_name: str = None, # custom tokenizer trust_remote_code: bool = False, @@ -252,6 +332,7 @@ def from_model( self._tokenizer = self._create_auto_tokenizer_with_name( model_name=model.name_or_path, revision=model.config._commit_hash, + env_config=env_config, trust_remote_code=trust_remote_code, tokenizer_name=tokenizer_name, ) @@ -302,17 +383,6 @@ def add_special_tokens(self): def max_length(self) -> int: return self._max_length - @property - def device(self) -> Union[int, str, torch.device]: - return self._device - - @property - def disable_tqdm(self) -> bool: - disable_tqdm = False - if self.accelerator: - disable_tqdm = bool(not self.accelerator.is_main_process) - return disable_tqdm - def init_model_parallel(self, model_parallel: bool | None = None) -> Tuple[bool, Optional[dict], Optional[str]]: """Compute all the parameters related to model_parallel""" if not is_accelerate_available(): @@ -320,8 +390,7 @@ def init_model_parallel(self, model_parallel: bool | None = None) -> Tuple[bool, self.num_local_processes = int(os.environ.get("LOCAL_WORLD_SIZE", 1)) self.num_machines = torch.cuda.device_count() // self.num_local_processes - - if self.num_machines == 1: + if self.num_machines == 0: logger.info("We are not in a distributed setting. Setting model_parallel to False.") model_parallel = False @@ -356,82 +425,134 @@ def init_model_parallel(self, model_parallel: bool | None = None) -> Tuple[bool, ) return model_parallel, max_mem_this_process, device_map - def _create_auto_model(self) -> transformers.PreTrainedModel: + def _create_auto_model( + self, config: TransformersModelConfig, env_config: EnvConfig + ) -> transformers.PreTrainedModel: """ Creates an instance of the pretrained HF model. + Args: + pretrained (str): The name or path of the pretrained model. + revision (str): The revision of the model. + subfolder (Optional[str], optional): The subfolder within the model. Defaults to None. + max_memory (Optional[dict], optional): The maximum memory to allocate for the model per GPU. Defaults to None. + device_map (Optional[dict], optional): The device mapping for the model. Defaults to None. + torch_dtype (Optional[Union[str, torch.dtype]], optional): The torch data type for the model. Defaults to None. + quantization_config (Optional[Union[BitsAndBytesConfig, GPTQConfig]], optional): The quantization configuration for the model. Defaults to None. + trust_remote_code (bool, optional): Whether to trust remote code. Defaults to False. + cache_dir (str, optional): The cache directory for the model. Defaults to "/scratch". + Returns: transformers.PreTrainedModel: The created auto model instance. """ - model_parallel, max_memory, device_map = self.init_model_parallel(self.config.model_parallel) - self.config.model_parallel = model_parallel - - if self.config.dtype == "4bit": - quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16) - elif self.config.dtype == "8bit": - quantization_config = BitsAndBytesConfig(load_in_8bit=True) - else: - quantization_config = None - - torch_dtype = _get_dtype(self.config.dtype) - subfolder = self.config.subfolder - revision = self.config.revision + (f"/{subfolder}" if subfolder is not None else "") - - pretrained_config = self.transformers_config + config.model_parallel, max_memory, device_map = self.init_model_parallel(config.model_parallel) + torch_dtype = _get_dtype(config.dtype, self._config) + + pretrained_config = AutoConfig.from_pretrained( + config.pretrained, + revision=(config.revision + (f"/{config.subfolder}" if config.subfolder else "")), + trust_remote_code=config.trust_remote_code, + cache_dir=env_config.cache_dir, + token=env_config.token, + ) kwargs = {} if "quantization_config" not in pretrained_config.to_dict(): - kwargs["quantization_config"] = quantization_config + kwargs["quantization_config"] = config.quantization_config model = AutoModelForCausalLM.from_pretrained( - self.config.model_name, - revision=revision, + config.pretrained, + revision=config.revision + (f"/{config.subfolder}" if config.subfolder is not None else ""), max_memory=max_memory, device_map=device_map, torch_dtype=torch_dtype, - trust_remote_code=self.config.trust_remote_code, + trust_remote_code=config.trust_remote_code, + cache_dir=env_config.cache_dir, + offload_folder=env_config.cache_dir, + token=env_config.token, **kwargs, ) - # model.to(self.device) - model.eval() - torch.set_grad_enabled(False) - - if self.config.compile: - try: - logger.info("Compiling the model") - model.compile() - except AttributeError as e: - logger.warning("Could not compile the model because: ", e) return model def _create_auto_tokenizer( + self, config: TransformersModelConfig, env_config: EnvConfig + ) -> transformers.PreTrainedTokenizer: + return self._create_auto_tokenizer_with_name( + model_name=config.pretrained, + revision=config.revision, + env_config=env_config, + tokenizer_name=config.tokenizer, + subfolder=config.subfolder, + trust_remote_code=config.trust_remote_code, + ) + + def _create_auto_tokenizer_with_name( self, + model_name: str, + revision: str, + env_config: EnvConfig, + tokenizer_name: str = None, + subfolder: str = None, + trust_remote_code: bool = False, ) -> transformers.PreTrainedTokenizer: """ Create a Hugging Face AutoTokenizer for language model. + Args: + pretrained (str): The identifier of the pretrained model to load. + revision (str): The specific model version to load. + subfolder (str): The subfolder within the model repository. + tokenizer (str, optional): The identifier of the tokenizer to load. If not provided, the default tokenizer for the pretrained model will be used. + cache_dir (str, optional): The directory to cache the downloaded models and tokens. Defaults to "/scratch". + trust_remote_code (bool, optional): Whether to trust remote code execution during tokenization. Defaults to False. + Returns: transformers.PreTrainedTokenizer: The created tokenizer. - """ - tokenizer_name = self.config.tokenizer or self.config.model_name - subfolder = self.config.subfolder - revision = self.config.revision + (f"/{subfolder}" if subfolder is not None else "") - tokenizer = AutoTokenizer.from_pretrained( - tokenizer_name, - revision=revision, - trust_remote_code=self.config.trust_remote_code, - padding_side="left", - truncation_side="left", - ) + Raises: + RecursionError: If an error occurs during tokenization, a fallback tokenizer with "" token will be created. + """ + try: + tokenizer = AutoTokenizer.from_pretrained( + model_name if tokenizer_name is None else tokenizer_name, + revision=revision + (f"/{subfolder}" if subfolder is not None else ""), + cache_dir=env_config.cache_dir, + token=env_config.token, + trust_remote_code=trust_remote_code, + padding_side="left", + truncation_side="left", + ) + except RecursionError: + tokenizer = AutoTokenizer.from_pretrained( + model_name if tokenizer_name is None else tokenizer_name, + revision=revision + (f"/{subfolder}" if subfolder is not None else ""), + cache_dir=env_config.cache_dir, + token=env_config.token, + trust_remote_code=trust_remote_code, + unk_token="", + padding_side="left", + truncation_side="left", + ) + except FileNotFoundError: + logger.warning( + "Problem when loading the tokenizer in the cache - discarding the provided cache path value." + ) + tokenizer = AutoTokenizer.from_pretrained( + model_name if tokenizer_name is None else tokenizer_name, + revision=revision + (f"/{subfolder}" if subfolder is not None else ""), + token=env_config.token, + trust_remote_code=trust_remote_code, + padding_side="left", + truncation_side="left", + ) tokenizer.pad_token = tokenizer.eos_token tokenizer.model_max_length = self.max_length logger.info("Tokenizer truncation and padding size set to the left side.") return tokenizer - def _init_max_length(self) -> int: + def _init_max_length(self, max_length) -> int: """Return the maximum sequence length of the model. NOTE: Different model configurations have different max sequence length attribute names. @@ -441,24 +562,37 @@ def _init_max_length(self) -> int: NOTE: For relative position encoded models you should specify the max sequence length of the model in the constructor via `max_length`. + Args: + max_length (Optional[int]): The maximum length of the input sequence. If not provided, it will be determined + based on the model's configuration or tokenizer's model_max_length attribute. + Returns: int: Max length to use depending on the available args and config """ - if self.config.max_length is not None: - return self.config.max_length - + if max_length is not None: + return int(max_length) # Try to get the sequence length from the model config. seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx") - for attr in seqlen_config_attrs: - if hasattr(self.transformers_config, attr): - return getattr(self.transformers_config, attr) - logger.warning( - "No max_length attribute found in the model config. Using the default max sequence length setting {2048}. It is recomended to set max_length trough the madel args" - ) + for attr in seqlen_config_attrs: + if hasattr(self._config, attr): + return getattr(self._config, attr) + # Default max sequence length setting for when no `max_length` is provided + # or no max length config setting is found in the model or tokenizer. return 2048 + @property + def device(self) -> Union[int, str, torch.device]: + return self._device + + @property + def disable_tqdm(self) -> bool: + disable_tqdm = False + if self.accelerator: + disable_tqdm = bool(not self.accelerator.is_main_process) + return disable_tqdm + def _check_continuations_start_space(self, continuation: str) -> str: """Some models tokenizer want a space at the beginning and other not. We update this if needed here. multichoice_continuations_start_space can be: @@ -477,8 +611,8 @@ def _check_continuations_start_space(self, continuation: str) -> str: def _model_call(self, inputs: torch.Tensor) -> torch.Tensor: return self.model(inputs).logits - def _get_batch_size(self, max_input_length: int, override_bs: int | None, starting_batch_size: int = 512) -> int: - if override_bs is not None: + def _get_batch_size(self, max_input_length: int, override_bs: int = 0, starting_batch_size: int = 512) -> int: + if override_bs > 0: return override_bs logger.info(f"Detecting largest batch size with max_input_length={max_input_length}") @@ -497,14 +631,162 @@ def forward_batch(batch_size): return batch_size def greedy_until_multi_turn( # noqa: C901 - self, - requests: list[GreedyUntilMultiTurnRequest], + self, requests: list[GreedyUntilMultiTurnRequest], override_bs: Optional[int] = None ) -> GenerativeMultiturnResponse: - raise NotImplementedError("This method is not implemented for this model") + for request in requests: + request.stop_sequence = as_list(request.stop_sequence) + [self.tokenizer.eos_token] + request.tokenized_context = self.tok_encode(request.context)["input_ids"] + + results = [] + + dataset = GenerativeTaskDataset(requests=requests, num_dataset_splits=1) + dataloader = DataLoader(dataset, batch_size=1, collate_fn=lambda batch: batch) + + if self.accelerator: + dataloader = self.accelerator.prepare(dataloader) + + logger.warning("Running greedy multi turn generation, the batch size is set to 1 for this task.") + + for request_batch in tqdm( + dataloader, desc="Greedy Multi Turn generation", position=1, leave=False, disable=self.disable_tqdm + ): + request = request_batch[0] + # For chat models, generation stops with EOS token, so we don't need to specify stop tokens + if self.use_chat_template: + stop_tokens = [] + else: + stop_tokens = request.stop_sequence + max_generated_tokens = request.generation_size + context = request.context[0] + max_context_size_allowed = self.max_length - max_generated_tokens + + model_inputs = self.tokenizer( + context, + padding=True, + truncation=True, + return_tensors="pt", + max_length=max_context_size_allowed, + add_special_tokens=self.add_special_tokens, + ).to(self.device) + + stopping_criteria = transformers.StoppingCriteriaList( + [ + *[ + MultiTokenEOSCriteria( + sequence, self.tokenizer, input_ids_shape=model_inputs["input_ids"].shape + ) + for sequence in stop_tokens + ], + ] + ) + + generation_config = self.generation_config_dict.copy() + generation_config.update( + { + "max_new_tokens": max_generated_tokens, + "pad_token_id": self.tokenizer.pad_token_id + if self.tokenizer.pad_token_id + else self.tokenizer.eos_token_id, + "eos_token_id": self.tokenizer.eos_token_id, + "do_sample": False, + } + ) + + model_outputs: GenerateOutput = self.model.generate( + **model_inputs, stopping_criteria=stopping_criteria, **generation_config + ) + model_outputs = model_outputs.sequences[0, model_inputs["input_ids"].size(1) :] + + # We manage stop tokens in an extra step in case they were incorrectly detected earlier + # (which can happen for multitoken stop sequences) + decoded_generation = self.tokenizer.decode(model_outputs) # should we skip_special_tokens=True here? + for term in stop_tokens: + decoded_generation = decoded_generation.split(term)[0] + model_generations = [model_outputs] + + input_tokens = [model_inputs["input_ids"]] + + for i, multi_turn_context in enumerate(request.context[1:]): + multi_turn_context = multi_turn_context.format(model_response=decoded_generation) + + model_inputs = self.tokenizer( + multi_turn_context, + padding=True, + truncation=True, + return_tensors="pt", + max_length=max_context_size_allowed, + add_special_tokens=self.add_special_tokens, + ).to(self.device) + + stopping_criteria = transformers.StoppingCriteriaList( + [ + *[ + MultiTokenEOSCriteria( + sequence, self.tokenizer, input_ids_shape=model_inputs["input_ids"].shape + ) + for sequence in stop_tokens + ], + ] + ) + + generation_config = self.generation_config_dict.copy() + generation_config.update( + { + "max_new_tokens": max_generated_tokens, + "pad_token_id": self.tokenizer.pad_token_id + if self.tokenizer.pad_token_id + else self.tokenizer.eos_token_id, + "eos_token_id": self.tokenizer.eos_token_id, + "do_sample": False, + } + ) + + model_outputs: GenerateOutput = self.model.generate( + input_ids=model_inputs["input_ids"], + attention_mask=model_inputs["attention_mask"], + stopping_criteria=stopping_criteria, + **generation_config, + ) + model_outputs = model_outputs.sequences[0, model_inputs["input_ids"].size(1) :] + model_generations.append(model_outputs) + input_tokens.append(model_inputs["input_ids"]) + + decoded_generation = self.tokenizer.decode(model_outputs, skip_special_tokens=True) + for term in stop_tokens: + decoded_generation = decoded_generation.split(term)[0] + + if self.accelerator: + padding_size = max(gen.shape[0] for gen in model_generations) + for i, gen in enumerate(model_generations): + model_generations[i] = F.pad( + gen, (0, padding_size - gen.shape[0]), value=self.tokenizer.pad_token_id + ) + model_generations = torch.stack(model_generations, dim=0) + model_generations, lengths = self.pad_and_gather(model_generations, drop_last_samples=False) + + model_answers = [] + for generation, _ in zip(model_generations, lengths): + generation = generation.cpu().tolist() + decoded = self.tokenizer.decode(generation, skip_special_tokens=True) + model_answers.append(decoded) + + for answers in batched(model_answers, len(request.context)): + results.append( + GenerativeMultiturnResponse( + result=answers, + input_tokens=input_tokens, + generated_tokens=[], + truncated_tokens_count=0, + padded_tokens_count=0, + ) + ) + + return results def greedy_until( self, requests: list[GreedyUntilRequest], + override_bs: Optional[int] = None, ) -> list[GenerativeResponse]: """ Generates responses using a greedy decoding strategy until certain ending conditions are met. @@ -543,7 +825,7 @@ def greedy_until( longest_context_continuation_size_in_split, self.max_length ) batch_size = self._get_batch_size( - override_bs=self.batch_size, + override_bs=override_bs, max_input_length=max_context_continuation_size_allowed, starting_batch_size=starting_batch_size, ) @@ -578,7 +860,7 @@ def greedy_until( tokenized = self.tokenizer( context, truncation="longest_first", # we truncate to the model max length if needed - padding="longest", # we pad to the longest sequence + padding="max_length", # we pad to the longest sequence return_tensors="pt", max_length=max_context_continuation_size_allowed, # we always allow minimum one token of generation add_special_tokens=self.add_special_tokens, @@ -1081,6 +1363,16 @@ def _loglikelihood_single_token( return dataset.get_original_order(res) +class BaseModel(TransformersModel): + def __post_init__(self): + super().__post_init__() + + warnings.warn( + "Careful, the BaseModel name is deprecated and will be removed, you should use TransformersModel instead!", + FutureWarning, + ) + + class MultiTokenEOSCriteria(transformers.StoppingCriteria): """Criteria to stop on the specified multi-token sequence.""" diff --git a/src/lighteval/models/utils.py b/src/lighteval/models/utils.py index 2398bfa02..ac540e2a6 100644 --- a/src/lighteval/models/utils.py +++ b/src/lighteval/models/utils.py @@ -20,87 +20,14 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -import json import os -import re from itertools import islice from typing import Optional, Union import torch -import yaml from huggingface_hub import HfApi -from pydantic import BaseModel from transformers import AutoConfig -from lighteval.models.model_input import GenerationParameters - - -class ModelConfig(BaseModel, extra="forbid"): - generation_parameters: GenerationParameters = GenerationParameters() - - @classmethod - def from_path(cls, path: str): - with open(path, "r") as f: - config = yaml.safe_load(f) - - return cls(**config["model_parameters"]) - - @classmethod - def from_args(cls, args: str): - config = cls._parse_args(args) - return cls(**config) - - @staticmethod - def _parse_args(args: str) -> dict: - """Parse a string of arguments into a configuration dictionary. - - This function parses a string containing model arguments and generation parameters - into a structured dictionary with two main sections: 'model' and 'generation'. - It specifically handles generation parameters enclosed in curly braces. - - Args: - args (str): A string containing comma-separated key-value pairs, where generation - parameters can be specified in a nested JSON-like format. - - Returns: - dict: A dictionary with two keys: - - 'model': Contains general model configuration parameters - - 'generation': Contains generation-specific parameters - - Examples: - >>> parse_args("model_name=gpt2,max_length=100") - { - 'model': {'model_name': 'gpt2', 'max_length': '100'}, - } - - >>> parse_args("model_name=gpt2,generation_parameters={temperature:0.7,top_p:0.9}") - { - 'model': {'model_name': 'gpt2', 'generation_parameters': {'temperature': 0.7, 'top_p': 0.9}, - } - - >>> parse_args("model_name=gpt2,use_cache,generation_parameters={temperature:0.7}") - { - 'model': {'model_name': 'gpt2', 'use_cache': True, 'generation_parameters': {'temperature': 0.7}}, - } - """ - # Looking for generation_parameters in the model_args - generation_parameters_dict = None - pattern = re.compile(r"(\w+)=(\{.*\}|[^,]+)") - matches = pattern.findall(args) - for key, value in matches: - key = key.strip() - if key == "generation_parameters": - gen_params = re.sub(r"(\w+):", r'"\1":', value) - generation_parameters_dict = json.loads(gen_params) - - args = re.sub(r"generation_parameters=\{.*?\},?", "", args).strip(",") - model_config = {k.split("=")[0]: k.split("=")[1] if "=" in k else True for k in args.split(",")} - - if generation_parameters_dict is not None: - model_config["generation_parameters"] = generation_parameters_dict - - return model_config - def _get_dtype(dtype: Union[str, torch.dtype, None], config: Optional[AutoConfig] = None) -> Optional[torch.dtype]: """ diff --git a/src/lighteval/models/vllm/vllm_model.py b/src/lighteval/models/vllm/vllm_model.py index 2bdf07529..6826662c6 100644 --- a/src/lighteval/models/vllm/vllm_model.py +++ b/src/lighteval/models/vllm/vllm_model.py @@ -24,25 +24,26 @@ import itertools import logging import os +from dataclasses import dataclass from typing import Optional import torch -from pydantic import NonNegativeFloat, PositiveInt from tqdm import tqdm from lighteval.data import GenerativeTaskDataset, LoglikelihoodDataset from lighteval.models.abstract_model import LightevalModel, ModelInfo +from lighteval.models.model_input import GenerationParameters from lighteval.models.model_output import ( GenerativeResponse, LoglikelihoodResponse, ) -from lighteval.models.utils import ModelConfig, _simplify_name +from lighteval.models.utils import _simplify_name from lighteval.tasks.requests import ( GreedyUntilRequest, LoglikelihoodRequest, ) from lighteval.utils.imports import is_vllm_available -from lighteval.utils.utils import as_list +from lighteval.utils.utils import EnvConfig, as_list logger = logging.getLogger(__name__) @@ -72,17 +73,18 @@ STARTING_BATCH_SIZE = 512 -class VLLMModelConfig(ModelConfig): - model_name: str +@dataclass +class VLLMModelConfig: + pretrained: str + gpu_memory_utilization: float = 0.9 # lower this if you are running out of memory revision: str = "main" # revision of the model dtype: str = "bfloat16" - tensor_parallel_size: PositiveInt = 1 # how many GPUs to use for tensor parallelism - data_parallel_size: PositiveInt = 1 # how many GPUs to use for data parallelism - pipeline_parallel_size: PositiveInt = 1 # how many GPUs to use for pipeline parallelism - gpu_memory_utilization: NonNegativeFloat = 0.9 # lower this if you are running out of memory - max_model_length: PositiveInt | None = None # maximum length of the model, ussually infered automatically. reduce this if you encouter OOM issues, 4096 is usually enough - swap_space: PositiveInt = 4 # CPU swap space size (GiB) per GPU. - seed: PositiveInt = 1234 + tensor_parallel_size: int = 1 # how many GPUs to use for tensor parallelism + pipeline_parallel_size: int = 1 # how many GPUs to use for pipeline parallelism + data_parallel_size: int = 1 # how many GPUs to use for data parallelism + max_model_length: int | None = None # maximum length of the model, ussually infered automatically. reduce this if you encouter OOM issues, 4096 is usually enough + swap_space: int = 4 # CPU swap space size (GiB) per GPU. + seed: int = 1234 trust_remote_code: bool = False use_chat_template: bool = False add_special_tokens: bool = True @@ -90,34 +92,42 @@ class VLLMModelConfig(ModelConfig): True # whether to add a space at the start of each continuation in multichoice generation ) pairwise_tokenization: bool = False # whether to tokenize the context and continuation separately or together. - max_num_seqs: PositiveInt = 128 # maximum number of sequences per iteration; This variable and `max_num_batched_tokens` effectively control the batch size at prefill stage. See https://github.com/vllm-project/vllm/issues/2492 for detailed explaination. - max_num_batched_tokens: PositiveInt = 2048 # maximum number of tokens per batch - subfolder: str | None = None + generation_parameters: GenerationParameters = None # sampling parameters to use for generation + max_num_seqs: int = 128 # maximum number of sequences per iteration; This variable and `max_num_batched_tokens` effectively control the batch size at prefill stage. See https://github.com/vllm-project/vllm/issues/2492 for detailed explaination. + max_num_batched_tokens: int = 2048 # maximum number of tokens per batch + + subfolder: Optional[str] = None + + def __post_init__(self): + if not self.generation_parameters: + self.generation_parameters = GenerationParameters() class VLLMModel(LightevalModel): def __init__( self, config: VLLMModelConfig, + env_config: EnvConfig, ): """Initializes a HuggingFace `AutoModel` and `AutoTokenizer` for evaluation.""" self._config = config self.use_chat_template = config.use_chat_template - self.data_parallel_size = config.data_parallel_size - self.tensor_parallel_size = config.tensor_parallel_size + self.data_parallel_size = int(config.data_parallel_size) + self.tensor_parallel_size = int(config.tensor_parallel_size) + self._add_special_tokens = config.add_special_tokens if config.add_special_tokens is not None else False - self._tokenizer = self._create_auto_tokenizer(config) + self._tokenizer = self._create_auto_tokenizer(config, env_config) - self._max_length = config.max_model_length if config.max_model_length is not None else None + self._max_length = int(config.max_model_length) if config.max_model_length is not None else None # If model_parallel is not set we compare the number of processes with the number of GPUs - self.model = self._create_auto_model(config) + self.model = self._create_auto_model(config, env_config) # self._device = config.accelerator.device if config.accelerator is not None else "cpu" self.multichoice_continuations_start_space = config.multichoice_continuations_start_space - self.model_name = _simplify_name(config.model_name) - self.model_sha = "" + self.model_name = _simplify_name(config.pretrained) + self.model_sha = "" # config.get_model_sha() self.precision = config.dtype self.model_info = ModelInfo(model_name=self.model_name, model_sha=self.model_sha) @@ -144,7 +154,7 @@ def add_special_tokens(self): def max_length(self) -> int: return self._max_length - def _create_auto_model(self, config: VLLMModelConfig) -> Optional[LLM]: + def _create_auto_model(self, config: VLLMModelConfig, env_config: EnvConfig) -> Optional[LLM]: """ Creates an instance of the pretrained HF model. @@ -163,20 +173,20 @@ def _create_auto_model(self, config: VLLMModelConfig) -> Optional[LLM]: transformers.PreTrainedModel: The created auto model instance. """ self.model_args = { - "model": config.model_name, - "gpu_memory_utilization": config.gpu_memory_utilization, + "model": config.pretrained, + "gpu_memory_utilization": float(config.gpu_memory_utilization), "revision": config.revision + (f"/{config.subfolder}" if config.subfolder is not None else ""), "dtype": config.dtype, "trust_remote_code": config.trust_remote_code, - "tensor_parallel_size": config.tensor_parallel_size, - "pipeline_parallel_size": config.pipeline_parallel_size, + "tensor_parallel_size": int(config.tensor_parallel_size), + "pipeline_parallel_size": int(config.pipeline_parallel_size), "max_model_len": self._max_length, "swap_space": 4, - "seed": int(config.seed), + "seed": config.seed, "max_num_seqs": int(config.max_num_seqs), "max_num_batched_tokens": int(config.max_num_batched_tokens), } - if config.data_parallel_size > 1: + if int(config.data_parallel_size) > 1: self.model_args["distributed_executor_backend"] = "ray" self._batch_size = "auto" return None @@ -191,9 +201,9 @@ def _create_auto_model(self, config: VLLMModelConfig) -> Optional[LLM]: return model - def _create_auto_tokenizer(self, config: VLLMModelConfig): + def _create_auto_tokenizer(self, config: VLLMModelConfig, env_config: EnvConfig): tokenizer = get_tokenizer( - config.model_name, + config.pretrained, tokenizer_mode="auto", trust_remote_code=config.trust_remote_code, tokenizer_revision=config.revision, diff --git a/src/lighteval/pipeline.py b/src/lighteval/pipeline.py index d032de8b2..83ab89b68 100644 --- a/src/lighteval/pipeline.py +++ b/src/lighteval/pipeline.py @@ -27,7 +27,7 @@ import re import shutil from contextlib import nullcontext -from dataclasses import dataclass +from dataclasses import asdict, dataclass, field from datetime import timedelta from enum import Enum, auto @@ -62,7 +62,7 @@ is_vllm_available, ) from lighteval.utils.parallelism import test_all_gather -from lighteval.utils.utils import make_results_table +from lighteval.utils.utils import EnvConfig, make_results_table if is_accelerate_available(): @@ -95,16 +95,18 @@ class ParallelismManager(Enum): class PipelineParameters: launcher_type: ParallelismManager # Env parameters + env_config: EnvConfig = field(default_factory=EnvConfig) job_id: int = 0 dataset_loading_processes: int = 1 nanotron_checkpoint_path: str | None = None # only for nanotron models # Dataset custom_tasks_directory: str | None = None + # Generation parameters + override_batch_size: int | None = None num_fewshot_seeds: int = 1 max_samples: int | None = None use_chat_template: bool = False system_prompt: str | None = None - cot_prompt: str | None = None load_responses_from_details_date_id: str | None = None def __post_init__(self): # noqa C901 @@ -154,13 +156,13 @@ def __init__( self.accelerator, self.parallel_context = self._init_parallelism_manager() self.model = self._init_model(model_config, model) - generation_parameters = model_config.generation_parameters.model_dump() if model_config else {} + generation_parameters = asdict(model_config.generation_parameters) if model_config else {} self.evaluation_tracker.general_config_logger.log_model_info(generation_parameters, self.model.model_info) - self._init_random_seeds() self._init_tasks_and_requests(tasks=tasks) + self._init_random_seeds() # Final results - self.final_dict: dict | None = None + self.final_dict: dict = None def _init_parallelism_manager(self): accelerator, parallel_context = None, None @@ -194,15 +196,17 @@ def _init_model(self, model_config, model): parallel_context=self.parallel_context, debug_one_layer_model=False, model_class=None, + env_config=self.pipeline_parameters.env_config, ) else: - return load_model(config=model_config) + return load_model(config=model_config, env_config=self.pipeline_parameters.env_config) if isinstance(model, TransformersModel): return model else: return TransformersModel.from_model( model=model, use_chat_template=self.pipeline_parameters.use_chat_template, + env_config=self.pipeline_parameters.env_config, accelerator=self.accelerator, ) @@ -210,6 +214,7 @@ def _init_tasks_and_requests(self, tasks: str): with local_ranks_zero_first() if self.launcher_type == ParallelismManager.NANOTRON else nullcontext(): logger.info("--- LOADING TASKS ---") registry = Registry( + cache_dir=self.pipeline_parameters.env_config.cache_dir, custom_tasks=self.pipeline_parameters.custom_tasks_directory, ) task_names_list, fewshots_dict = taskinfo_selector(tasks, registry) @@ -231,7 +236,6 @@ def _init_tasks_and_requests(self, tasks: str): evaluation_tracker=self.evaluation_tracker, use_chat_template=self.pipeline_parameters.use_chat_template, system_prompt=self.pipeline_parameters.system_prompt, - cot_prompt=self.pipeline_parameters.cot_prompt, ) self.task_names_list = task_names_list @@ -272,6 +276,7 @@ def is_main_process(self): def evaluate(self): self.evaluation_tracker.general_config_logger.log_args_info( num_fewshot_seeds=self.pipeline_parameters.num_fewshot_seeds, + override_batch_size=self.pipeline_parameters.override_batch_size, max_samples=self.pipeline_parameters.max_samples, job_id=self.pipeline_parameters.job_id, config=self.model_config, @@ -464,7 +469,7 @@ def _run_model(self): for request_type, requests in self.requests.items(): logger.info(f"Running {request_type} requests") run_model = self.model.get_method_from_request_type(request_type=request_type) - responses = run_model(requests) + responses = run_model(requests, override_bs=self.pipeline_parameters.override_batch_size) # Storing the responses associated to the same samples together for response, request in zip(responses, requests): diff --git a/src/lighteval/tasks/default_prompts.py b/src/lighteval/tasks/default_prompts.py index 2745b63c5..3745a7724 100644 --- a/src/lighteval/tasks/default_prompts.py +++ b/src/lighteval/tasks/default_prompts.py @@ -43,16 +43,6 @@ # fmt: on -def simpleqa(line, task_name: str = None): - query = line["problem"] - choices = [line["answer"]] - gold_index = 0 - - return Doc( - task_name=task_name, query=query, choices=choices, gold_index=gold_index, specific={**eval(line["metadata"])} - ) - - def aime_prompt_fn(line, task_name: str = None): # Prompt template adapted from # - simple-evals: https://github.com/openai/simple-evals/blob/6e84f4e2aed6b60f6a0c7b8f06bbbf4bfde72e58/math_eval.py#L17 @@ -100,57 +90,6 @@ def apps(line, task_name: str = None): ) -def arc_agi_2(line, task_name: str = None): - # query from: https://github.com/arcprize/model_baseline/blob/main/src/prompts/system_prompt.txt - def convert_2d_list_to_string(list_of_lists: list[list[int]]) -> str: - """ - Convert a list of lists to a string - """ - - string_list = "" - - for row in list_of_lists: - string_list += json.dumps(row) + "\n" - - return string_list - - query = """You are participating in a puzzle solving competition. You are an expert at solving puzzles. - -Below is a list of input and output pairs with a pattern. Your goal is to identify the pattern or transformation in the training examples that maps the input to the output, then apply that pattern to the test input to give a final output. - -Respond in the format of the training output examples - ---Training Examples-- -{training_examples} ---End of Training Examples-- - ---Test Input-- -{test_input} ---End of Test Input-- - -Your response:""".strip() - - training_pairs = line["fewshots"] - training_examples = "" - for i, pair in enumerate(training_pairs): - training_examples += f"--Example {i}-- \n\n INPUT: \n\n" - training_examples += convert_2d_list_to_string(pair["input"]) + "\n\n" - training_examples += "OUTPUT: \n\n" - training_examples += convert_2d_list_to_string(pair["output"]) + "\n\n" - - test_input = convert_2d_list_to_string(line["question"][0]["input"]) - - gold = str(line["question"][0]["output"]) - query = query.format(training_examples=training_examples, test_input=test_input) - - return Doc( - task_name=task_name, - query=query, - choices=[gold], - gold_index=0, - ) - - def arc(line, task_name: str = None): return Doc( task_name=task_name, diff --git a/src/lighteval/tasks/default_tasks.py b/src/lighteval/tasks/default_tasks.py index f092092ce..16a83701d 100644 --- a/src/lighteval/tasks/default_tasks.py +++ b/src/lighteval/tasks/default_tasks.py @@ -323,24 +323,7 @@ few_shots_split=None, few_shots_select=None, generation_size=32768, - metric=[ - Metrics.expr_gold_metric, - Metrics.math_pass_at_1_32n, - ], - version=1, -) -aime24_gpassk = LightevalTaskConfig( - name="aime24_gpassk", - suite=["lighteval"], - prompt_function=prompt.aime_prompt_fn, - hf_repo="HuggingFaceH4/aime_2024", - hf_subset="default", - hf_avail_splits=["train"], - evaluation_splits=["train"], - few_shots_split=None, - few_shots_select=None, - generation_size=8192, - metric=[Metrics.g_pass_at_16_expr_gold], + metric=[Metrics.expr_gold_metric], version=1, ) aime25 = LightevalTaskConfig( @@ -354,24 +337,7 @@ few_shots_split=None, few_shots_select=None, generation_size=10000, - metric=[ - Metrics.expr_gold_metric, - Metrics.math_pass_at_1_32n, - ], - version=1, -) -aime25_gpassk = LightevalTaskConfig( - name="aime25_gpassk", - suite=["lighteval"], - prompt_function=prompt.aime_prompt_fn, - hf_repo="yentinglin/aime_2025", - hf_subset="default", - hf_avail_splits=["train"], - evaluation_splits=["train"], - few_shots_split=None, - few_shots_select=None, - generation_size=8192, - metric=[Metrics.g_pass_at_16_expr_gold], + metric=[Metrics.expr_gold_metric], version=1, ) anachronisms_bigbench = LightevalTaskConfig( @@ -433,7 +399,7 @@ few_shots_split="train_r1", few_shots_select="random_sampling_from_train", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -449,7 +415,7 @@ few_shots_split="train_r2", few_shots_select="random_sampling_from_train", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -465,27 +431,11 @@ few_shots_split="train_r3", few_shots_select="random_sampling_from_train", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, ) -arc_agi_2 = LightevalTaskConfig( - name="arc_agi_2", - suite=["lighteval"], - prompt_function=prompt.arc_agi_2, - hf_repo="arc-agi-community/arc-agi-2", - hf_subset="default", - hf_avail_splits=["train", "test"], - evaluation_splits=["test"], - few_shots_split=None, - few_shots_select=None, - generation_size=2048, - metric=[Metrics.exact_match], - stop_sequence=None, - trust_dataset=False, - version=0, -) arc_c_letters_original = LightevalTaskConfig( name="arc:c:letters", suite=["original", "arc"], @@ -854,7 +804,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -870,7 +820,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -886,7 +836,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -902,7 +852,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -918,7 +868,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -934,7 +884,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -950,7 +900,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -966,7 +916,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -982,7 +932,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -998,7 +948,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1014,7 +964,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1030,7 +980,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1046,7 +996,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1062,7 +1012,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1078,7 +1028,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1094,7 +1044,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1110,7 +1060,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -1126,7 +1076,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["", "Q=", "\n\n"], trust_dataset=True, version=0, @@ -7377,7 +7327,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7393,7 +7343,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7409,7 +7359,7 @@ few_shots_split=None, few_shots_select=None, generation_size=5, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7425,7 +7375,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7441,7 +7391,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7617,7 +7567,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, Metrics.mcc_single_token], + metric=[Metrics.loglikelihood_acc_single_token, Metrics.mcc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7681,7 +7631,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7713,7 +7663,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7729,7 +7679,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7745,7 +7695,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7761,7 +7711,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -7793,7 +7743,7 @@ few_shots_split=None, few_shots_select="random_sampling", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -9689,27 +9639,13 @@ metric=[Metrics.latex_gold_metric], version=1, ) -math_500_gpassk = LightevalTaskConfig( - name="math_500_gpassk", - suite=["lighteval"], - prompt_function=prompt.math_500, - hf_repo="HuggingFaceH4/MATH-500", - hf_subset="default", - hf_avail_splits=["test"], - evaluation_splits=["test"], - few_shots_split=None, - few_shots_select=None, - generation_size=8192, - metric=[Metrics.g_pass_at_16_latex_gold], - version=1, -) math_algebra_lighteval = LightevalTaskConfig( name="math:algebra", suite=["lighteval", "math"], prompt_function=prompt.math, - hf_repo="DigitalLearningGmbH/MATH-lighteval", + hf_repo="lighteval/MATH", hf_subset="algebra", - hf_avail_splits=["train", "test"], + hf_avail_splits=["train", "test", "validation"], evaluation_splits=["test"], few_shots_split=None, few_shots_select=None, @@ -9723,9 +9659,9 @@ name="math:counting_and_probability", suite=["lighteval", "math"], prompt_function=prompt.math, - hf_repo="DigitalLearningGmbH/MATH-lighteval", + hf_repo="lighteval/MATH", hf_subset="counting_and_probability", - hf_avail_splits=["train", "test"], + hf_avail_splits=["train", "test", "validation"], evaluation_splits=["test"], few_shots_split=None, few_shots_select=None, @@ -9739,9 +9675,9 @@ name="math:geometry", suite=["lighteval", "math"], prompt_function=prompt.math, - hf_repo="DigitalLearningGmbH/MATH-lighteval", + hf_repo="lighteval/MATH", hf_subset="geometry", - hf_avail_splits=["train", "test"], + hf_avail_splits=["train", "test", "validation"], evaluation_splits=["test"], few_shots_split=None, few_shots_select=None, @@ -9755,9 +9691,9 @@ name="math:intermediate_algebra", suite=["lighteval", "math"], prompt_function=prompt.math, - hf_repo="DigitalLearningGmbH/MATH-lighteval", + hf_repo="lighteval/MATH", hf_subset="intermediate_algebra", - hf_avail_splits=["train", "test"], + hf_avail_splits=["train", "test", "validation"], evaluation_splits=["test"], few_shots_split=None, few_shots_select=None, @@ -9771,9 +9707,9 @@ name="math:number_theory", suite=["lighteval", "math"], prompt_function=prompt.math, - hf_repo="DigitalLearningGmbH/MATH-lighteval", + hf_repo="lighteval/MATH", hf_subset="number_theory", - hf_avail_splits=["train", "test"], + hf_avail_splits=["train", "test", "validation"], evaluation_splits=["test"], few_shots_split=None, few_shots_select=None, @@ -9787,9 +9723,9 @@ name="math:prealgebra", suite=["lighteval", "math"], prompt_function=prompt.math, - hf_repo="DigitalLearningGmbH/MATH-lighteval", + hf_repo="lighteval/MATH", hf_subset="prealgebra", - hf_avail_splits=["train", "test"], + hf_avail_splits=["train", "test", "validation"], evaluation_splits=["test"], few_shots_split=None, few_shots_select=None, @@ -9803,9 +9739,9 @@ name="math:precalculus", suite=["lighteval", "math"], prompt_function=prompt.math, - hf_repo="DigitalLearningGmbH/MATH-lighteval", + hf_repo="lighteval/MATH", hf_subset="precalculus", - hf_avail_splits=["train", "test"], + hf_avail_splits=["train", "test", "validation"], evaluation_splits=["test"], few_shots_split=None, few_shots_select=None, @@ -9819,9 +9755,9 @@ name="math_cot:algebra", suite=["lighteval", "math"], prompt_function=prompt.math_cot, - hf_repo="DigitalLearningGmbH/MATH-lighteval", + hf_repo="lighteval/MATH", hf_subset="algebra", - hf_avail_splits=["train", "test"], + hf_avail_splits=["train", "test", "validation"], evaluation_splits=["test"], few_shots_split=None, few_shots_select=None, @@ -9835,9 +9771,9 @@ name="math_cot:counting_and_probability", suite=["lighteval", "math"], prompt_function=prompt.math_cot, - hf_repo="DigitalLearningGmbH/MATH-lighteval", + hf_repo="lighteval/MATH", hf_subset="counting_and_probability", - hf_avail_splits=["train", "test"], + hf_avail_splits=["train", "test", "validation"], evaluation_splits=["test"], few_shots_split=None, few_shots_select=None, @@ -9851,9 +9787,9 @@ name="math_cot:geometry", suite=["lighteval", "math"], prompt_function=prompt.math_cot, - hf_repo="DigitalLearningGmbH/MATH-lighteval", + hf_repo="lighteval/MATH", hf_subset="geometry", - hf_avail_splits=["train", "test"], + hf_avail_splits=["train", "test", "validation"], evaluation_splits=["test"], few_shots_split=None, few_shots_select=None, @@ -9867,9 +9803,9 @@ name="math_cot:intermediate_algebra", suite=["lighteval", "math"], prompt_function=prompt.math_cot, - hf_repo="DigitalLearningGmbH/MATH-lighteval", + hf_repo="lighteval/MATH", hf_subset="intermediate_algebra", - hf_avail_splits=["train", "test"], + hf_avail_splits=["train", "test", "validation"], evaluation_splits=["test"], few_shots_split=None, few_shots_select=None, @@ -9883,9 +9819,9 @@ name="math_cot:number_theory", suite=["lighteval", "math"], prompt_function=prompt.math_cot, - hf_repo="DigitalLearningGmbH/MATH-lighteval", + hf_repo="lighteval/MATH", hf_subset="number_theory", - hf_avail_splits=["train", "test"], + hf_avail_splits=["train", "test", "validation"], evaluation_splits=["test"], few_shots_split=None, few_shots_select=None, @@ -9899,9 +9835,9 @@ name="math_cot:prealgebra", suite=["lighteval", "math"], prompt_function=prompt.math_cot, - hf_repo="DigitalLearningGmbH/MATH-lighteval", + hf_repo="lighteval/MATH", hf_subset="prealgebra", - hf_avail_splits=["train", "test"], + hf_avail_splits=["train", "test", "validation"], evaluation_splits=["test"], few_shots_split=None, few_shots_select=None, @@ -9915,9 +9851,9 @@ name="math_cot:precalculus", suite=["lighteval", "math"], prompt_function=prompt.math_cot, - hf_repo="DigitalLearningGmbH/MATH-lighteval", + hf_repo="lighteval/MATH", hf_subset="precalculus", - hf_avail_splits=["train", "test"], + hf_avail_splits=["train", "test", "validation"], evaluation_splits=["test"], few_shots_split=None, few_shots_select=None, @@ -10350,7 +10286,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10403,7 +10339,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10456,7 +10392,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10509,7 +10445,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10562,7 +10498,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10615,7 +10551,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10668,7 +10604,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10721,7 +10657,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10774,7 +10710,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10827,7 +10763,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10880,7 +10816,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10933,7 +10869,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -10986,7 +10922,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11039,7 +10975,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11092,7 +11028,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11145,7 +11081,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11198,7 +11134,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11251,7 +11187,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11304,7 +11240,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11357,7 +11293,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11410,7 +11346,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11463,7 +11399,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11516,7 +11452,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11569,7 +11505,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11622,7 +11558,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11675,7 +11611,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11728,7 +11664,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11781,7 +11717,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11834,7 +11770,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11887,7 +11823,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11940,7 +11876,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -11993,7 +11929,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12046,7 +11982,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12099,7 +12035,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12152,7 +12088,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12205,7 +12141,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12258,7 +12194,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12311,7 +12247,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12364,7 +12300,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12417,7 +12353,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12470,7 +12406,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12523,7 +12459,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12576,7 +12512,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12629,7 +12565,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12682,7 +12618,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12735,7 +12671,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12788,7 +12724,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12841,7 +12777,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12894,7 +12830,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -12947,7 +12883,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13000,7 +12936,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13053,7 +12989,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13106,7 +13042,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13159,7 +13095,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13212,7 +13148,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13265,7 +13201,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -13318,7 +13254,7 @@ few_shots_split="dev", few_shots_select="sequential", generation_size=1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14181,7 +14117,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -14860,22 +14796,6 @@ trust_dataset=True, version=0, ) -simpleqa = LightevalTaskConfig( - name="simpleqa", - suite=["lighteval"], - prompt_function=prompt.simpleqa, - hf_repo="lighteval/SimpleQA", - hf_subset="default", - hf_avail_splits=["test"], - evaluation_splits=["test"], - few_shots_split=None, - few_shots_select=None, - generation_size=2048, - metric=[Metrics.simpleqa_judge], - stop_sequence=["\n"], - trust_dataset=True, - version=0, -) simple_arithmetic_json_bigbench = LightevalTaskConfig( name="simple_arithmetic_json", suite=["bigbench", "bigbench_json"], @@ -15249,7 +15169,7 @@ few_shots_split=None, few_shots_select=None, generation_size=1, - metric=[Metrics.loglikelihood_acc, "multi_f1_numeric"], + metric=[Metrics.loglikelihood_acc_single_token, "multi_f1_numeric"], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15281,7 +15201,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15313,7 +15233,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, @@ -15329,7 +15249,7 @@ few_shots_split=None, few_shots_select=None, generation_size=-1, - metric=[Metrics.loglikelihood_acc], + metric=[Metrics.loglikelihood_acc_single_token], stop_sequence=["\n"], trust_dataset=True, version=0, diff --git a/src/lighteval/tasks/extended/lcb/main.py b/src/lighteval/tasks/extended/lcb/main.py index 4ebc4d3fe..088f35d09 100644 --- a/src/lighteval/tasks/extended/lcb/main.py +++ b/src/lighteval/tasks/extended/lcb/main.py @@ -34,6 +34,7 @@ import numpy as np from aenum import extend_enum +from datasets import get_dataset_config_names from lighteval.metrics.metrics import MetricCategory, Metrics, MetricUseCase, SampleLevelMetric from lighteval.tasks.extended.lcb.codegen_metrics import ( @@ -48,10 +49,10 @@ def prepare_prompt(line: dict[str, Any]) -> str: query = "You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests.\n\n" query += f"Question: {line['question_content']}\n\n" if starter_code := line.get("starter_code", None): - query += "You will use the following starter code to write the solution to the problem and enclose your code within delimiters.\n" + query += "You will use the following starter code to write the solution to the problem and enclose your code within delimiters." query += f"```python\n{starter_code}\n```\n\n" else: - query += "Read the inputs from stdin solve the problem and write the answer to stdout (do not directly test on the sample inputs). Enclose your code within delimiters as follows. Ensure that when the python program runs, it reads the inputs, runs the algorithm and writes output to STDOUT.\n" + query += "Read the inputs from stdin solve the problem and write the answer to stdout (do not directly test on the sample inputs). Enclose your code within delimiters as follows." query += "```python\n# YOUR CODE HERE\n```\n\n" return query @@ -114,29 +115,7 @@ def codegen_metric(predictions: list[str], formatted_doc: Doc, **kwargs) -> floa extend_enum(Metrics, "lcb_codegen_metric", lcb_codegen_metric) -configs = [ - "release_v1", - "release_v2", - "release_v3", - "release_v4", - "release_v5", - "release_latest", - "v1", - "v2", - "v3", - "v4", - "v5", - "v1_v2", - "v1_v3", - "v1_v4", - "v1_v5", - "v2_v3", - "v2_v4", - "v2_v5", - "v3_v4", - "v3_v5", - "v4_v5", -] +configs = get_dataset_config_names("livecodebench/code_generation_lite", trust_remote_code=True) tasks = [] diff --git a/src/lighteval/tasks/lighteval_task.py b/src/lighteval/tasks/lighteval_task.py index 2681e779d..e7761a206 100644 --- a/src/lighteval/tasks/lighteval_task.py +++ b/src/lighteval/tasks/lighteval_task.py @@ -584,7 +584,6 @@ def create_requests_from_tasks( # noqa: C901 evaluation_tracker: "EvaluationTracker", use_chat_template: bool, system_prompt: str | None, - cot_prompt: str | None, ) -> Tuple[dict[RequestType, list[Request]], dict[SampleUid, Doc]]: """ Takes a task dict and a fewshot dict and returns a dict of requests, a dict @@ -602,8 +601,6 @@ def create_requests_from_tasks( # noqa: C901 max_samples (int): maximum number of samples. evaluation_tracker (EvaluationTracker): evaluation tracker. use_chat_template (bool): Whether to use the chat template. - system_prompt (str): System prompt - cot_prompt (str): Chain of thought prompt Raises: NotImplementedError: If the request type is not implemented for the @@ -651,7 +648,6 @@ def create_requests_from_tasks( # noqa: C901 truncate_few_shots=truncate_few_shots, use_chat_template=use_chat_template, system_prompt=system_prompt, - cot_prompt=cot_prompt, ) # Constructing the requests diff --git a/src/lighteval/tasks/prompt_manager.py b/src/lighteval/tasks/prompt_manager.py index d1dc15d0d..aff4632c6 100644 --- a/src/lighteval/tasks/prompt_manager.py +++ b/src/lighteval/tasks/prompt_manager.py @@ -107,7 +107,6 @@ def add_context_to_doc( truncate_few_shots: bool = False, use_chat_template=False, system_prompt: str = None, - cot_prompt: str = None, ) -> Doc: is_multi_turn = doc.specific is not None and len(doc.specific.get("multi_turn_queries", [])) > 0 if is_multi_turn: @@ -122,7 +121,6 @@ def add_context_to_doc( sampler=sampler, use_chat_template=use_chat_template, system_prompt=system_prompt, - cot_prompt=cot_prompt, ) doc.num_effective_few_shots = num_effective_few_shots doc.num_asked_few_shots = num_fewshot @@ -177,7 +175,6 @@ def _single_turn_context( truncate_few_shots: bool = False, use_chat_template=False, system_prompt: str = None, - cot_prompt: str = None, ): """Returns a fewshot context string that is made up of a prepended description (if provided), the `num_fewshot` number of examples, and an appended prompt example. @@ -209,7 +206,6 @@ def _single_turn_context( fewshot_ex=fewshot_ex, system_prompt=system_prompt, use_chat_template=use_chat_template, - cot_prompt=cot_prompt, ) if not use_chat_template: toks = self.model.tok_encode(output) @@ -232,7 +228,6 @@ def _single_turn_context( fewshot_ex=fewshot_ex[:num_effective_fewshots], system_prompt=system_prompt, use_chat_template=use_chat_template, - cot_prompt=cot_prompt, ) if not use_chat_template: toks = self.model.tok_encode(output) @@ -257,7 +252,6 @@ def get_examples( fewshot_ex: list[str], system_prompt: Union[str | None], use_chat_template: bool, - cot_prompt: Union[str | None], ): examples = [] # Few shot examples @@ -269,12 +263,10 @@ def get_examples( examples.append(self.doc_to_text(ex, return_instructions=False) + self.doc_to_target(ex)) # Actual example - content = example + cot_prompt if cot_prompt is not None else example - if use_chat_template: - examples.append({"role": "user", "content": content}) + examples.append({"role": "user", "content": example}) else: - examples.append(content) + examples.append(example) # System prompt and instruction if use_chat_template: diff --git a/src/lighteval/tasks/registry.py b/src/lighteval/tasks/registry.py index 4fb8d9230..174a98d33 100644 --- a/src/lighteval/tasks/registry.py +++ b/src/lighteval/tasks/registry.py @@ -56,7 +56,6 @@ "extended", "custom", "community", - "test", ] TRUNCATE_FEW_SHOTS_DEFAULTS = True diff --git a/src/lighteval/tasks/templates/utils/translation_literals.py b/src/lighteval/tasks/templates/utils/translation_literals.py index 0e5c8592d..df29d4912 100644 --- a/src/lighteval/tasks/templates/utils/translation_literals.py +++ b/src/lighteval/tasks/templates/utils/translation_literals.py @@ -1030,29 +1030,7 @@ def __getattribute__(self, name: str) -> str: colon=":", semicolon="؛", ), - Language.UZBEK: TranslationLiterals( - language=Language.UZBEK, - question_word="savol", - answer="javob", - confirmation_word="to'g'ri", - yes="ha", - no="yo'q", - also="shuningdek", - or_word="yoki", - and_word="va", - cause_word="chunki", - effect_word="shuning uchun", - true="rost", - false="yolg'on", - neither="hech biri", - full_stop=".", - comma=",", - question_mark="?", - exclamation_mark="!", - word_space=" ", - sentence_space=" ", - colon=":", - ), + Language.UZBEK: TranslationLiterals(language=Language.UZBEK), Language.VIETNAMESE: TranslationLiterals( language=Language.VIETNAMESE, question_word="câu hỏi", diff --git a/src/lighteval/utils/utils.py b/src/lighteval/utils/utils.py index c4f2956ea..2c0ea487e 100644 --- a/src/lighteval/utils/utils.py +++ b/src/lighteval/utils/utils.py @@ -11,7 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from dataclasses import asdict, is_dataclass +import os +from dataclasses import asdict, dataclass, is_dataclass from typing import Callable, TypeVar, Union import numpy as np @@ -181,6 +182,20 @@ def make_results_table(result_dict): return md_writer.dumps() +@dataclass +class EnvConfig: + """ + Configuration class for environment settings. + + Attributes: + cache_dir (str): directory for caching data. + token (str): authentication token used for accessing the HuggingFace Hub. + """ + + cache_dir: str = os.getenv("HF_HUB_CACHE", "/scratch") + token: str = os.getenv("HF_TOKEN") + + def boolstring_to_bool(x: Union[str, bool, int]) -> Union[bool, None]: """Allows to manage string or bool to bool conversion, in case a configuration input is badly formatted. diff --git a/tests/conftest.py b/tests/conftest.py deleted file mode 100644 index a568cc130..000000000 --- a/tests/conftest.py +++ /dev/null @@ -1,23 +0,0 @@ -# MIT License - -# Copyright (c) 2024 The HuggingFace Team - -import pytest - - -def pytest_addoption(parser): - parser.addoption("--runslow", action="store_true", default=False, help="run slow tests") - - -def pytest_configure(config): - config.addinivalue_line("markers", "slow: mark test as slow to run") - - -def pytest_collection_modifyitems(config, items): - if config.getoption("--runslow"): - # --runslow given in cli: do not skip slow tests - return - skip_slow = pytest.mark.skip(reason="need --runslow option to run") - for item in items: - if "slow" in item.keywords: - item.add_marker(skip_slow) diff --git a/tests/models/endpoints/test_endpoint_model.py b/tests/models/endpoints/test_endpoint_model.py index d2291d17e..f4ba15d91 100644 --- a/tests/models/endpoints/test_endpoint_model.py +++ b/tests/models/endpoints/test_endpoint_model.py @@ -21,11 +21,13 @@ # SOFTWARE. import pytest -import yaml from lighteval.models.endpoints.endpoint_model import InferenceEndpointModelConfig +# "examples/model_configs/endpoint_model.yaml" + + class TestInferenceEndpointModelConfig: @pytest.mark.parametrize( "config_path, expected_config", @@ -34,8 +36,8 @@ class TestInferenceEndpointModelConfig: "examples/model_configs/endpoint_model.yaml", { "model_name": "meta-llama/Llama-2-7b-hf", - "dtype": "float16", "revision": "main", + "model_dtype": "float16", "endpoint_name": None, "reuse_existing": False, "accelerator": "gpu", @@ -48,31 +50,36 @@ class TestInferenceEndpointModelConfig: "namespace": None, "image_url": None, "env_vars": None, - "add_special_tokens": True, - "generation_parameters": { - "early_stopping": None, - "frequency_penalty": None, - "length_penalty": None, - "max_new_tokens": 256, - "min_new_tokens": None, - "min_p": None, - "presence_penalty": None, - "repetition_penalty": None, - "seed": None, - "stop_tokens": None, - "temperature": 0.2, - "top_k": None, - "top_p": 0.9, - "truncate_prompt": None, - "response_format": None, - }, }, ), + ( + "examples/model_configs/serverless_model.yaml", + { + "model_name": "meta-llama/Llama-3.1-8B-Instruct", + # Defaults: + "revision": "main", + "model_dtype": None, + "endpoint_name": None, + "reuse_existing": False, + "accelerator": "gpu", + "region": "us-east-1", + "vendor": "aws", + "instance_type": None, + "instance_size": None, + "framework": "pytorch", + "endpoint_type": "protected", + "namespace": None, + "image_url": None, + "env_vars": None, + }, + ), + ( + "examples/model_configs/endpoint_model_reuse_existing.yaml", + {"endpoint_name": "llama-2-7B-lighteval", "reuse_existing": True}, + ), ], ) def test_from_path(self, config_path, expected_config): - with open(config_path, "r") as f: - config = yaml.safe_load(f) - config = InferenceEndpointModelConfig.from_path(config_path) - assert config.model_dump() == expected_config + for key, value in expected_config.items(): + assert getattr(config, key) == value diff --git a/tests/models/endpoints/test_tgi_model.py b/tests/models/endpoints/test_tgi_model.py index 872dc06ce..2ffb520c1 100644 --- a/tests/models/endpoints/test_tgi_model.py +++ b/tests/models/endpoints/test_tgi_model.py @@ -20,6 +20,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +from dataclasses import asdict import pytest @@ -35,7 +36,7 @@ class TestTGIModelConfig: { "inference_server_address": "", "inference_server_auth": None, - "model_name": None, + "model_id": None, "generation_parameters": { "early_stopping": None, "frequency_penalty": None, @@ -59,4 +60,4 @@ class TestTGIModelConfig: ) def test_from_path(self, config_path, expected_config): config = TGIModelConfig.from_path(config_path) - assert config.model_dump() == expected_config + assert asdict(config) == expected_config diff --git a/tests/models/test_abstract_model.py b/tests/models/test_abstract_model.py index 4066f3259..e7fc0172e 100644 --- a/tests/models/test_abstract_model.py +++ b/tests/models/test_abstract_model.py @@ -23,10 +23,11 @@ from transformers import AutoTokenizer from lighteval.models.dummy.dummy_model import DummyModel, DummyModelConfig +from lighteval.utils.utils import EnvConfig def test_tok_encode_pair(): - model = DummyModel(config=DummyModelConfig(seed=42)) + model = DummyModel(config=DummyModelConfig(seed=42), env_config=EnvConfig()) model._tokenizer = AutoTokenizer.from_pretrained("facebook/xglm-564M") context = "答案:" continuation = "1" diff --git a/tests/models/test_base_model.py b/tests/models/test_base_model.py index 5593db077..d8cc02a4a 100644 --- a/tests/models/test_base_model.py +++ b/tests/models/test_base_model.py @@ -22,15 +22,17 @@ from lighteval.models.model_loader import load_model from lighteval.models.transformers.transformers_model import TransformersModel, TransformersModelConfig +from lighteval.utils.utils import EnvConfig def test_empty_requests(): model_config = TransformersModelConfig( - model_name="hf-internal-testing/tiny-random-LlamaForCausalLM", model_parallel=False, revision="main" + "hf-internal-testing/tiny-random-LlamaForCausalLM", model_parallel=False, revision="main" ) - model: TransformersModel = load_model(config=model_config) + model: TransformersModel = load_model(config=model_config, env_config=EnvConfig(cache_dir=".")) assert model.loglikelihood([]) == [] assert model.loglikelihood_single_token([]) == [] assert model.loglikelihood_rolling([]) == [] assert model.greedy_until([]) == [] + assert model.greedy_until_multi_turn([]) == [] diff --git a/tests/reference_scores/SmolLM2-1.7B-Instruct-results-accelerate.json b/tests/reference_scores/SmolLM2-1.7B-Instruct-results-accelerate.json deleted file mode 100644 index 38e249bdb..000000000 --- a/tests/reference_scores/SmolLM2-1.7B-Instruct-results-accelerate.json +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5a3b8efc2168b8307c729fd3d505c7b681148ab720d4d04d711754f4e47f0669 -size 49156 diff --git a/tests/reference_scores/SmolLM2-1.7B-Instruct-results-vllm.json b/tests/reference_scores/SmolLM2-1.7B-Instruct-results-vllm.json deleted file mode 100644 index ffb7159f5..000000000 --- a/tests/reference_scores/SmolLM2-1.7B-Instruct-results-vllm.json +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ba484d14fa25e113618787e6f496f7b905fa19bc60d23a390959afc89094d315 -size 49053 diff --git a/tests/reference_scores/reference_task_scores.py b/tests/reference_scores/reference_task_scores.py new file mode 100644 index 000000000..b4e550a62 --- /dev/null +++ b/tests/reference_scores/reference_task_scores.py @@ -0,0 +1,639 @@ +# MIT License + +# Copyright (c) 2024 The HuggingFace Team + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +"""Results on the full suite.""" +RESULTS_FULL = { + "gpt2-xl": { + "lighteval|anli:r1|0|0": {"acc": 0.337, "acc_stderr": 0.014955087918653605}, + "lighteval|blimp:adjunct_island|0|0": {"acc": 0.893, "acc_stderr": 0.009779910359847165}, + "lighteval|blimp:ellipsis_n_bar_1|0|0": {"acc": 0.909, "acc_stderr": 0.009099549538400246}, + "leaderboard|arc:challenge|25|0": { + "acc": 0.257679180887372, + "acc_stderr": 0.0127807705627684, + "acc_norm": 0.302901023890785, + "acc_norm_stderr": 0.013428241573185347, + }, + "leaderboard|hellaswag|10|0": { + "acc": 0.3981278629755029, + "acc_stderr": 0.004885116465550274, + "acc_norm": 0.5139414459271061, + "acc_norm_stderr": 0.004987841367402517, + }, + "leaderboard|mmlu:abstract_algebra|5|0": { + "acc": 0.26, + "acc_stderr": 0.04408440022768081, + }, + "leaderboard|mmlu:college_chemistry|5|0": { + "acc": 0.24, + "acc_stderr": 0.04292346959909284, + }, + "leaderboard|mmlu:computer_security|5|0": { + "acc": 0.29, + "acc_stderr": 0.04560480215720684, + }, + "leaderboard|mmlu:us_foreign_policy|5|0": { + "acc": 0.22, + "acc_stderr": 0.041633319989322695, + }, + "leaderboard|truthfulqa:mc|0|0": { + "truthfulqa_mc1": 0.22031823745410037, + "truthfulqa_mc1_stderr": 0.0145090451714873, + "truthfulqa_mc2": 0.3853407807086726, + "truthfulqa_mc2_stderr": 0.014058180381569934, + }, + "helm|mmlu:abstract_algebra|5|0": { + "em": 0.26, + "em_stderr": 0.04408440022768081, + "pqem": 0.48, + "pqem_stderr": 0.05021167315686779, + }, + "helm|mmlu:college_chemistry|5|0": { + "em": 0.22, + "em_stderr": 0.041633319989322695, + "pqem": 0.33, + "pqem_stderr": 0.04725815626252604, + }, + "helm|mmlu:computer_security|5|0": { + "em": 0.22, + "em_stderr": 0.04163331998932269, + "pqem": 0.41, + "pqem_stderr": 0.04943110704237101, + }, + "helm|mmlu:us_foreign_policy|5|0": { + "em": 0.2, + "em_stderr": 0.04020151261036846, + "pqem": 0.48, + "pqem_stderr": 0.050211673156867795, + }, + "helm|boolq|5|0": { + "em": 0.5963302752293578, + "em_stderr": 0.008581220435616823, + "qem": 0.5966360856269113, + "qem_stderr": 0.008580168554889729, + "pem": 0.6048929663608563, + "pem_stderr": 0.008550454248280891, + "pqem": 0.6051987767584098, + "pqem_stderr": 0.008549304887647408, + }, + "leaderboard|gsm8k|5|0": {"qem": 0.009097801364670205, "qem_stderr": 0.002615326510775673}, + # "gsm8k": {"acc": 0.009097801364670205, "acc_stderr": 0.002615326510775673}, Actual harness results + }, + "gpt2": { + "lighteval|anli:r1|0|0": {"acc": 0.341, "acc_stderr": 0.014998131348402704}, + "lighteval|blimp:adjunct_island|0|0": {"acc": 0.913, "acc_stderr": 0.00891686663074591}, + "lighteval|blimp:ellipsis_n_bar_1|0|0": {"acc": 0.842, "acc_stderr": 0.011539894677559568}, + "leaderboard|arc:challenge|25|0": { + "acc": 0.20051194539249148, + "acc_stderr": 0.011700318050499373, + "acc_norm": 0.21928327645051193, + "acc_norm_stderr": 0.012091245787615723, + }, + "leaderboard|hellaswag|10|0": { + "acc": 0.29267078271260705, + "acc_stderr": 0.004540586983229992, + "acc_norm": 0.3157737502489544, + "acc_norm_stderr": 0.0046387332023738815, + }, + "leaderboard|mmlu:abstract_algebra|5|0": { + "acc": 0.21, + "acc_stderr": 0.040936018074033256, + }, + "leaderboard|mmlu:college_chemistry|5|0": { + "acc": 0.2, + "acc_stderr": 0.04020151261036846, + }, + "leaderboard|mmlu:computer_security|5|0": { + "acc": 0.16, + "acc_stderr": 0.03684529491774709, + }, + "leaderboard|mmlu:us_foreign_policy|5|0": { + "acc": 0.27, + "acc_stderr": 0.04461960433384739, + }, + "leaderboard|truthfulqa:mc|0|0": { + "truthfulqa_mc1": 0.22766217870257038, + "truthfulqa_mc1_stderr": 0.01467925503211107, + "truthfulqa_mc2": 0.40693581786045147, + "truthfulqa_mc2_stderr": 0.014921948720110469, + }, + "helm|mmlu:abstract_algebra|5|0": { + "em": 0.21, + "em_stderr": 0.040936018074033256, + "pqem": 0.37, + "pqem_stderr": 0.048523658709391, + }, + "helm|mmlu:college_chemistry|5|0": { + "em": 0.25, + "em_stderr": 0.04351941398892446, + "pqem": 0.3, + "pqem_stderr": 0.046056618647183814, + }, + "helm|mmlu:computer_security|5|0": { + "em": 0.14, + "em_stderr": 0.03487350880197769, + "pqem": 0.41, + "pqem_stderr": 0.04943110704237102, + }, + "helm|mmlu:us_foreign_policy|5|0": { + "em": 0.27, + "em_stderr": 0.044619604333847394, + "pqem": 0.51, + "pqem_stderr": 0.05024183937956911, + }, + "helm|boolq|5|0": { + "em": 0.5406727828746177, + "em_stderr": 0.00871607349717106, + "qem": 0.5406727828746177, + "qem_stderr": 0.00871607349717106, + "pem": 0.5406727828746177, + "pem_stderr": 0.00871607349717106, + "pqem": 0.5406727828746177, + "pqem_stderr": 0.00871607349717106, + }, + "leaderboard|gsm8k|5|0": {"qem": 0.006065200909780136, "qem_stderr": 0.0021386703014604626}, + # "harness|gsm8k|5|0": {"acc": 0.004548900682335102, "acc_stderr": 0.0018535550440036204}, Actual harness results + "harness|bigbench:causal_judgment|3|0": { + "acc": 0.4842, + "acc_stderr": 0.0364, + "acc_norm": 0.4947, + "acc_norm_stderr": 0.0364, + }, + "harness|bigbench:date_understanding|3|0": { + "acc": 0.2764, + "acc_stderr": 0.0233, + "acc_norm": 0.2764, + "acc_norm_stderr": 0.0233, + }, + "harness|bigbench:disambiguation_qa|3|0": { + "acc": 0.3372, + "acc_stderr": 0.0295, + "acc_norm": 0.3450, + "acc_norm_stderr": 0.0297, + }, + "harness|bigbench:geometric_shapes|3|0": { + "acc": 0.1058, + "acc_stderr": 0.0163, + "acc_norm": 0.1476, + "acc_norm_stderr": 0.0187, + }, + "harness|bigbench:logical_deduction_five_objects|3|0": { + "acc": 0.2080, + "acc_stderr": 0.0182, + "acc_norm": 0.2120, + "acc_norm_stderr": 0.0183, + }, + "harness|bigbench:logical_deduction_seven_objects|3|0": { + "acc": 0.1743, + "acc_stderr": 0.0143, + "acc_norm": 0.1743, + "acc_norm_stderr": 0.0143, + }, + "harness|bigbench:logical_deduction_three_objects|3|0": { + "acc": 0.3033, + "acc_stderr": 0.0266, + "acc_norm": 0.3167, + "acc_norm_stderr": 0.0269, + }, + "harness|bigbench:movie_recommendation|3|0": { + "acc": 0.3900, + "acc_stderr": 0.0218, + "acc_norm": 0.3460, + "acc_norm_stderr": 0.0213, + }, + "harness|bigbench:navigate|3|0": { + "acc": 0.4990, + "acc_stderr": 0.0158, + "acc_norm": 0.5000, + "acc_norm_stderr": 0.0158, + }, + "harness|bigbench:reasoning_about_colored_objects|3|0": { + "acc": 0.1665, + "acc_stderr": 0.0083, + "acc_norm": 0.1535, + "acc_norm_stderr": 0.0081, + }, + "harness|bigbench:ruin_names|3|0": { + "acc": 0.3393, + "acc_stderr": 0.0224, + "acc_norm": 0.3237, + "acc_norm_stderr": 0.0221, + }, + "harness|bigbench:salient_translation_error_detection|3|0": { + "acc": 0.1834, + "acc_stderr": 0.0123, + "acc_norm": 0.1834, + "acc_norm_stderr": 0.0123, + }, + "harness|bigbench:snarks|3|0": { + "acc": 0.5359, + "acc_stderr": 0.0372, + "acc_norm": 0.5359, + "acc_norm_stderr": 0.0372, + }, + "harness|bigbench:sports_understanding|3|0": { + "acc": 0.5010, + "acc_stderr": 0.0159, + "acc_norm": 0.5020, + "acc_norm_stderr": 0.0159, + }, + "harness|bigbench:temporal_sequences|3|0": { + "acc": 0.2700, + "acc_stderr": 0.0140, + "acc_norm": 0.2710, + "acc_norm_stderr": 0.0141, + }, + "harness|bigbench:tracking_shuffled_objects_five_objects|3|0": { + "acc": 0.1928, + "acc_stderr": 0.0112, + "acc_norm": 0.1976, + "acc_norm_stderr": 0.0113, + }, + "harness|bigbench:tracking_shuffled_objects_seven_objects|3|0": { + "acc": 0.1463, + "acc_stderr": 0.0085, + "acc_norm": 0.1383, + "acc_norm_stderr": 0.0083, + }, + "harness|bigbench:tracking_shuffled_objects_three_objects|3|0": { + "acc": 0.3033, + "acc_stderr": 0.0266, + "acc_norm": 0.3167, + "acc_norm_stderr": 0.0269, + }, + "lighteval|bigbench:causal_judgment|3|0": {"acc": 0.5158, "acc_stderr": 0.0364}, + "lighteval|bigbench:date_understanding|3|0": {"acc": 0.0000, "acc_stderr": 0.0000}, + "lighteval|bigbench:disambiguation_qa|3|0": {"acc": 0.2984, "acc_stderr": 0.0285}, + "lighteval|bigbench:geometric_shapes|3|0": {"acc": 0.0972, "acc_stderr": 0.0156}, + "lighteval|bigbench:logical_deduction_five_objects|3|0": {"acc": 0.2000, "acc_stderr": 0.0179}, + "lighteval|bigbench:logical_deduction_seven_objects|3|0": {"acc": 0.1429, "acc_stderr": 0.0132}, + "lighteval|bigbench:logical_deduction_three_objects|3|0": {"acc": 0.3333, "acc_stderr": 0.0273}, + "lighteval|bigbench:movie_recommendation|3|0": {"acc": 0.2540, "acc_stderr": 0.0195}, + "lighteval|bigbench:navigate|3|0": {"acc": 0.4990, "acc_stderr": 0.0158}, + "lighteval|bigbench:reasoning_about_colored_objects|3|0": {"acc": 0.1560, "acc_stderr": 0.0081}, + "lighteval|bigbench:ruin_names|3|0": {"acc": 0.2411, "acc_stderr": 0.0202}, + "lighteval|bigbench:salient_translation_error_detection|3|0": {"acc": 0.1673, "acc_stderr": 0.0118}, + "lighteval|bigbench:snarks|3|0": {"acc": 0.4696, "acc_stderr": 0.0372}, + "lighteval|bigbench:sports_understanding|3|0": {"acc": 0.4990, "acc_stderr": 0.0158}, + "lighteval|bigbench:temporal_sequences|3|0": {"acc": 1.0000, "acc_stderr": 0.0000}, + "lighteval|bigbench:tracking_shuffled_objects_five_objects|3|0": {"acc": 0.1976, "acc_stderr": 0.0113}, + "lighteval|bigbench:tracking_shuffled_objects_seven_objects|3|0": {"acc": 0.1406, "acc_stderr": 0.0083}, + "lighteval|bigbench:tracking_shuffled_objects_three_objects|3|0": {"acc": 0.3333, "acc_stderr": 0.0273}, + }, +} + +"""Results on 10 samples, using no parallelism""" +RESULTS_LITE = { + "gpt2-xl": { + "lighteval|anli:r1|0|0": {"acc": 0.4, "acc_stderr": 0.16329931618554522}, + "lighteval|blimp:adjunct_island|0|0": {"acc": 0.9, "acc_stderr": 0.09999999999999999}, + "lighteval|blimp:ellipsis_n_bar_1|0|0": {"acc": 0.8, "acc_stderr": 0.13333333333333333}, + "leaderboard|arc:challenge|25|0": { + "acc": 0.2, + "acc_stderr": 0.13333333333333333, + "acc_norm": 0.1, + "acc_norm_stderr": 0.09999999999999999, + }, + "leaderboard|hellaswag|10|0": { + "acc": 0.4, + "acc_stderr": 0.16329931618554522, + "acc_norm": 0.8, + "acc_norm_stderr": 0.13333333333333333, + }, + "leaderboard|mmlu:abstract_algebra|5|0": { + "acc": 0.3, + "acc_stderr": 0.15275252316519466, + }, + "leaderboard|mmlu:college_chemistry|5|0": { + "acc": 0.2, + "acc_stderr": 0.13333333333333333, + }, + "leaderboard|mmlu:computer_security|5|0": { + "acc": 0.4, + "acc_stderr": 0.1632993161855452, + }, + "leaderboard|mmlu:us_foreign_policy|5|0": { + "acc": 0.3, + "acc_stderr": 0.15275252316519466, + }, + "leaderboard|truthfulqa:mc|0|0": { + "truthfulqa_mc1": 0.3, + "truthfulqa_mc1_stderr": 0.15275252316519466, + "truthfulqa_mc2": 0.4528717362471066, + "truthfulqa_mc2_stderr": 0.14740763841220644, + }, + "helm|mmlu:abstract_algebra|5|0": { + "em": 0.3, + "em_stderr": 0.15275252316519466, + "pqem": 0.4, + "pqem_stderr": 0.16329931618554522, + }, + "helm|mmlu:college_chemistry|5|0": { + "em": 0.1, + "em_stderr": 0.09999999999999999, + "pqem": 0.2, + "pqem_stderr": 0.13333333333333333, + }, + "helm|mmlu:computer_security|5|0": { + "em": 0.1, + "em_stderr": 0.09999999999999999, + "pqem": 0.3, + "pqem_stderr": 0.15275252316519464, + }, + "helm|mmlu:us_foreign_policy|5|0": { + "em": 0.2, + "em_stderr": 0.13333333333333333, + "pqem": 0.5, + "pqem_stderr": 0.16666666666666666, + }, + "helm|boolq|5|0": { + "em": 0.6, + "em_stderr": 0.16329931618554522, + "qem": 0.6, + "qem_stderr": 0.16329931618554522, + "pem": 0.6, + "pem_stderr": 0.16329931618554522, + "pqem": 0.6, + "pqem_stderr": 0.16329931618554522, + }, + "leaderboard|gsm8k|5|0": {"qem": 0.1, "qem_stderr": 0.09999999999999999}, + }, + "gpt2": { + "lighteval|anli:r1|0|0": {"acc": 0.5, "acc_stderr": 0.16666666666666666}, + "lighteval|blimp:adjunct_island|0|0": {"acc": 0.8, "acc_stderr": 0.13333333333333333}, + "lighteval|blimp:ellipsis_n_bar_1|0|0": {"acc": 0.7, "acc_stderr": 0.15275252316519466}, + "leaderboard|arc:challenge|25|0": { + "acc": 0.3, + "acc_stderr": 0.15275252316519466, + "acc_norm": 0.0, + "acc_norm_stderr": 0.0, + }, + "leaderboard|hellaswag|10|0": { + "acc": 0.4, + "acc_stderr": 0.16329931618554522, + "acc_norm": 0.6, + "acc_norm_stderr": 0.16329931618554522, + }, + "leaderboard|mmlu:abstract_algebra|5|0": { + "acc": 0.4, + "acc_stderr": 0.16329931618554522, + }, + "leaderboard|mmlu:college_chemistry|5|0": { + "acc": 0.1, + "acc_stderr": 0.09999999999999999, + }, + "leaderboard|mmlu:computer_security|5|0": { + "acc": 0.1, + "acc_stderr": 0.09999999999999999, + }, + "leaderboard|mmlu:us_foreign_policy|5|0": { + "acc": 0.3, + "acc_stderr": 0.15275252316519466, + }, + "leaderboard|truthfulqa:mc|0|0": { + "truthfulqa_mc1": 0.3, + "truthfulqa_mc1_stderr": 0.15275252316519466, + "truthfulqa_mc2": 0.4175889390166028, + "truthfulqa_mc2_stderr": 0.14105533101540416, + }, + "helm|mmlu:abstract_algebra|5|0": { + "em": 0.4, + "em_stderr": 0.16329931618554522, + "pqem": 0.4, + "pqem_stderr": 0.16329931618554522, + }, + "helm|mmlu:college_chemistry|5|0": { + "em": 0.3, + "em_stderr": 0.15275252316519466, + "pqem": 0.4, + "pqem_stderr": 0.16329931618554522, + }, + "helm|mmlu:computer_security|5|0": { + "em": 0.1, + "em_stderr": 0.09999999999999999, + "pqem": 0.3, + "pqem_stderr": 0.15275252316519464, + }, + "helm|mmlu:us_foreign_policy|5|0": { + "em": 0.3, + "em_stderr": 0.15275252316519466, + "pqem": 0.6, + "pqem_stderr": 0.16329931618554522, + }, + "helm|boolq|5|0": { + "em": 0.4, + "em_stderr": 0.16329931618554522, + "qem": 0.4, + "qem_stderr": 0.16329931618554522, + "pem": 0.4, + "pem_stderr": 0.16329931618554522, + "pqem": 0.4, + "pqem_stderr": 0.16329931618554522, + }, + "leaderboard|gsm8k|5|0": {"qem": 0.0, "qem_stderr": 0.0}, + "harness|bigbench:causal_judgment|3|0": { + "acc": 0.6000, + "acc_stderr": 0.1633, + "acc_norm": 0.5000, + "acc_norm_stderr": 0.16666666666666666, + }, + "harness|bigbench:date_understanding|3|0": { + "acc": 0.2000, + "acc_stderr": 0.13333333333333333, + "acc_norm": 0.2000, + "acc_norm_stderr": 0.13333333333333333, + }, + "harness|bigbench:disambiguation_qa|3|0": { + "acc": 0.7000, + "acc_stderr": 0.15275252316519466, + "acc_norm": 0.3000, + "acc_norm_stderr": 0.15275252316519464, + }, + "harness|bigbench:geometric_shapes|3|0": { + "acc": 0.0000, + "acc_stderr": 0.0000, + "acc_norm": 0.2000, + "acc_norm_stderr": 0.13333333333333333, + }, + "harness|bigbench:logical_deduction_five_objects|3|0": { + "acc": 0.3000, + "acc_stderr": 0.15275252316519464, + "acc_norm": 0.3000, + "acc_norm_stderr": 0.15275252316519464, + }, + "harness|bigbench:logical_deduction_seven_objects|3|0": { + "acc": 0.2000, + "acc_stderr": 0.13333333333333333, + "acc_norm": 0.2000, + "acc_norm_stderr": 0.13333333333333333, + }, + "harness|bigbench:logical_deduction_three_objects|3|0": { + "acc": 0.2000, + "acc_stderr": 0.13333333333333333, + "acc_norm": 0.3000, + "acc_norm_stderr": 0.15275252316519466, + }, + "harness|bigbench:movie_recommendation|3|0": { + "acc": 0.5000, + "acc_stderr": 0.16666666666666666, + "acc_norm": 0.3000, + "acc_norm_stderr": 0.15275252316519464, + }, + "harness|bigbench:navigate|3|0": { + "acc": 0.6000, + "acc_stderr": 0.1633, + "acc_norm": 0.6000, + "acc_norm_stderr": 0.1633, + }, + "harness|bigbench:reasoning_about_colored_objects|3|0": { + "acc": 0.2000, + "acc_stderr": 0.13333333333333333, + "acc_norm": 0.1000, + "acc_norm_stderr": 0.1000, + }, + "harness|bigbench:ruin_names|3|0": { + "acc": 0.2000, + "acc_stderr": 0.13333333333333333, + "acc_norm": 0.2000, + "acc_norm_stderr": 0.13333333333333333, + }, + "harness|bigbench:salient_translation_error_detection|3|0": { + "acc": 0.1000, + "acc_stderr": 0.1000, + "acc_norm": 0.1000, + "acc_norm_stderr": 0.1000, + }, + "harness|bigbench:snarks|3|0": { + "acc": 0.4000, + "acc_stderr": 0.1633, + "acc_norm": 0.4000, + "acc_norm_stderr": 0.1633, + }, + "harness|bigbench:sports_understanding|3|0": { + "acc": 0.6000, + "acc_stderr": 0.1633, + "acc_norm": 0.6000, + "acc_norm_stderr": 0.1633, + }, + "harness|bigbench:temporal_sequences|3|0": { + "acc": 0.1000, + "acc_stderr": 0.1000, + "acc_norm": 0.1000, + "acc_norm_stderr": 0.1000, + }, + "harness|bigbench:tracking_shuffled_objects_five_objects|3|0": { + "acc": 0.2000, + "acc_stderr": 0.13333333333333333, + "acc_norm": 0.1000, + "acc_norm_stderr": 0.1000, + }, + "harness|bigbench:tracking_shuffled_objects_seven_objects|3|0": { + "acc": 0.1000, + "acc_stderr": 0.1000, + "acc_norm": 0.0000, + "acc_norm_stderr": 0.0000, + }, + "harness|bigbench:tracking_shuffled_objects_three_objects|3|0": { + "acc": 0.2000, + "acc_stderr": 0.13333333333333333, + "acc_norm": 0.3000, + "acc_norm_stderr": 0.15275252316519466, + }, + "lighteval|bigbench:causal_judgment|3|0": {"acc": 0.5000, "acc_stderr": 0.16666666666666666}, + "lighteval|bigbench:date_understanding|3|0": {"acc": 0.0000, "acc_stderr": 0.0000}, + "lighteval|bigbench:disambiguation_qa|3|0": {"acc": 0.7000, "acc_stderr": 0.15275252316519466}, + "lighteval|bigbench:geometric_shapes|3|0": {"acc": 0.2000, "acc_stderr": 0.13333333333333333}, + "lighteval|bigbench:logical_deduction_five_objects|3|0": {"acc": 0.1000, "acc_stderr": 0.1000}, + "lighteval|bigbench:logical_deduction_seven_objects|3|0": {"acc": 0.2000, "acc_stderr": 0.13333333333333333}, + "lighteval|bigbench:logical_deduction_three_objects|3|0": {"acc": 0.4000, "acc_stderr": 0.1633}, + "lighteval|bigbench:movie_recommendation|3|0": {"acc": 0.3000, "acc_stderr": 0.15275252316519466}, + "lighteval|bigbench:navigate|3|0": {"acc": 0.4000, "acc_stderr": 0.1633}, + "lighteval|bigbench:reasoning_about_colored_objects|3|0": {"acc": 0.2000, "acc_stderr": 0.13333333333333333}, + "lighteval|bigbench:ruin_names|3|0": {"acc": 0.3000, "acc_stderr": 0.15275252316519464}, + "lighteval|bigbench:salient_translation_error_detection|3|0": {"acc": 0.4000, "acc_stderr": 0.1633}, + "lighteval|bigbench:snarks|3|0": {"acc": 0.6000, "acc_stderr": 0.1633}, + "lighteval|bigbench:sports_understanding|3|0": {"acc": 0.6000, "acc_stderr": 0.1633}, + "lighteval|bigbench:temporal_sequences|3|0": {"acc": 1.0000, "acc_stderr": 0.0000}, + "lighteval|bigbench:tracking_shuffled_objects_five_objects|3|0": { + "acc": 0.2000, + "acc_stderr": 0.13333333333333333, + }, + "lighteval|bigbench:tracking_shuffled_objects_seven_objects|3|0": { + "acc": 0.3000, + "acc_stderr": 0.15275252316519464, + }, + "lighteval|bigbench:tracking_shuffled_objects_three_objects|3|0": {"acc": 0.4000, "acc_stderr": 0.1633}, + "lighteval|agieval:_average|0|0": { + "acc": 0.2125, + "acc_stderr": 0.1323, + "acc_norm": 0.2250, + "acc_norm_stderr": 0.1347, + }, + "lighteval|agieval:aqua-rat|0|0": { + "acc": 0.3000, + "acc_stderr": 0.15275, + "acc_norm": 0.3000, + "acc_norm_stderr": 0.15275, + }, + "lighteval|agieval:logiqa-en|0|0": { + "acc": 0.1000, + "acc_stderr": 0.1000, + "acc_norm": 0.3000, + "acc_norm_stderr": 0.15275, + }, + "lighteval|agieval:lsat-ar|0|0": { + "acc": 0.1000, + "acc_stderr": 0.1000, + "acc_norm": 0.1000, + "acc_norm_stderr": 0.1000, + }, + "lighteval|agieval:lsat-lr|0|0": { + "acc": 0.2000, + "acc_stderr": 0.13333, + "acc_norm": 0.2000, + "acc_norm_stderr": 0.13333, + }, + "lighteval|agieval:lsat-rc|0|0": { + "acc": 0.3000, + "acc_stderr": 0.15275, + "acc_norm": 0.2000, + "acc_norm_stderr": 0.13333, + }, + "lighteval|agieval:sat-en-without-passage|0|0": { + "acc": 0.2000, + "acc_stderr": 0.13333, + "acc_norm": 0.3000, + "acc_norm_stderr": 0.15275, + }, + "lighteval|agieval:sat-en|0|0": { + "acc": 0.2000, + "acc_stderr": 0.13333, + "acc_norm": 0.3000, + "acc_norm_stderr": 0.15275, + }, + "lighteval|agieval:sat-math|0|0": { + "acc": 0.3000, + "acc_stderr": 0.15275, + "acc_norm": 0.1000, + "acc_norm_stderr": 0.1000, + }, + }, +} diff --git a/tests/reference_scores/reference_tasks.py b/tests/reference_scores/reference_tasks.py new file mode 100644 index 000000000..7a41809db --- /dev/null +++ b/tests/reference_scores/reference_tasks.py @@ -0,0 +1,114 @@ +# MIT License + +# Copyright (c) 2024 The HuggingFace Team + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# todo: add original once we are sure of the results +MMLU_SUBSET = [ + "leaderboard|mmlu:abstract_algebra|5|0", + "helm|mmlu:abstract_algebra|5|0", + # "original|mmlu:abstract_algebra|5", + "leaderboard|mmlu:college_chemistry|5|0", + "helm|mmlu:college_chemistry|5|0", + # "original|mmlu:college_chemistry|5", + "leaderboard|mmlu:computer_security|5|0", + "helm|mmlu:computer_security|5|0", + # "original|mmlu:computer_security|5", + "leaderboard|mmlu:us_foreign_policy|5|0", + "helm|mmlu:us_foreign_policy|5|0", + # "original|mmlu:us_foreign_policy|5", +] + +LEADERBOARD_SUBSET = [ + "leaderboard|arc:challenge|25|0", + "leaderboard|truthfulqa:mc|0|0", + "leaderboard|hellaswag|10|0", + "leaderboard|mmlu:abstract_algebra|5|0", + "leaderboard|mmlu:college_chemistry|5|0", + "leaderboard|mmlu:computer_security|5|0", + "leaderboard|mmlu:us_foreign_policy|5|0", + "leaderboard|gsm8k|5|0", +] + +STABLE_SUBSET = [ + "helm|mmlu:abstract_algebra|5|0", + "helm|mmlu:college_chemistry|5|0", + "helm|mmlu:computer_security|5|0", + "helm|mmlu:us_foreign_policy|5|0", + "lighteval|anli:r1|0|0", + "lighteval|blimp:adjunct_island|0|0", + "lighteval|blimp:ellipsis_n_bar_1|0|0", +] + +HELM_SUBSET = [ + "helm|boolq|5|0", +] + +AGIEVAL_SUBSET = [ + "lighteval|agieval:aqua-rat|0|0", + "lighteval|agieval:logiqa-en|0|0", + "lighteval|agieval:lsat-ar|0|0", + "lighteval|agieval:lsat-lr|0|0", + "lighteval|agieval:lsat-rc|0|0", + "lighteval|agieval:sat-en-without-passage|0|0", + "lighteval|agieval:sat-en|0|0", + "lighteval|agieval:sat-math|0|0", +] + +BBH_SUBSET = [ + "lighteval|bigbench:causal_judgment|3|0", + "harness|bigbench:causal_judgment|3|0", + "lighteval|bigbench:date_understanding|3|0", + "harness|bigbench:date_understanding|3|0", + "lighteval|bigbench:disambiguation_qa|3|0", + "harness|bigbench:disambiguation_qa|3|0", + "lighteval|bigbench:geometric_shapes|3|0", + "harness|bigbench:geometric_shapes|3|0", + "lighteval|bigbench:logical_deduction_five_objects|3|0", + "harness|bigbench:logical_deduction_five_objects|3|0", + "lighteval|bigbench:logical_deduction_seven_objects|3|0", + "harness|bigbench:logical_deduction_seven_objects|3|0", + "lighteval|bigbench:logical_deduction_three_objects|3|0", + "harness|bigbench:logical_deduction_three_objects|3|0", + "lighteval|bigbench:movie_recommendation|3|0", + "harness|bigbench:movie_recommendation|3|0", + "lighteval|bigbench:navigate|3|0", + "harness|bigbench:navigate|3|0", + "lighteval|bigbench:reasoning_about_colored_objects|3|0", + "harness|bigbench:reasoning_about_colored_objects|3|0", + "lighteval|bigbench:ruin_names|3|0", + "harness|bigbench:ruin_names|3|0", + "lighteval|bigbench:salient_translation_error_detection|3|0", + "harness|bigbench:salient_translation_error_detection|3|0", + "lighteval|bigbench:snarks|3|0", + "harness|bigbench:snarks|3|0", + "lighteval|bigbench:sports_understanding|3|0", + "harness|bigbench:sports_understanding|3|0", + "lighteval|bigbench:temporal_sequences|3|0", + "harness|bigbench:temporal_sequences|3|0", + "lighteval|bigbench:tracking_shuffled_objects_five_objects|3|0", + "harness|bigbench:tracking_shuffled_objects_five_objects|3|0", + "lighteval|bigbench:tracking_shuffled_objects_seven_objects|3|0", + "harness|bigbench:tracking_shuffled_objects_seven_objects|3|0", + "lighteval|bigbench:tracking_shuffled_objects_three_objects|3|0", + "harness|bigbench:tracking_shuffled_objects_three_objects|3|0", +] + +ALL_SUBSETS = LEADERBOARD_SUBSET + STABLE_SUBSET + HELM_SUBSET + AGIEVAL_SUBSET + BBH_SUBSET diff --git a/tests/slow_tests/__init__.py b/tests/slow_tests/__init__.py deleted file mode 100644 index a732db8d0..000000000 --- a/tests/slow_tests/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# MIT License - -# Copyright (c) 2024 The HuggingFace Team - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. diff --git a/tests/slow_tests/test_accelerate_model.py b/tests/slow_tests/test_accelerate_model.py deleted file mode 100644 index 064288633..000000000 --- a/tests/slow_tests/test_accelerate_model.py +++ /dev/null @@ -1,105 +0,0 @@ -# MIT License - -# Copyright (c) 2024 The HuggingFace Team - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -import json -import os -from functools import lru_cache, partial -from typing import Callable, Tuple - -import pytest -from deepdiff import DeepDiff - -from lighteval.main_accelerate import accelerate # noqa: E402 - - -# Set env var for deterministic run of models -os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" - -MODELS_ARGS = [ - # {"model_name": "gpt2", "use_chat_template": False, "revision": "main", "results_file": "tests/reference_scores/gpt2-results.json"}, - { - "model_name": "examples/model_configs/transformers_model.yaml", - "use_chat_template": True, - "results_file": "tests/reference_scores/SmolLM2-1.7B-Instruct-results-accelerate.json", - } -] -TASKS_PATH = "examples/test_tasks.txt" -CUSTOM_TASKS_PATH = "examples/custom_tasks_tests.py" - -ModelInput = Tuple[str, Callable[[], dict]] - - -@lru_cache(maxsize=len(MODELS_ARGS)) -def run_model(model_name: str, use_chat_template: bool): - """Runs the full main as a black box, using the input model and tasks, on 10 samples without parallelism""" - results = accelerate( - model_args=model_name, - tasks=TASKS_PATH, - use_chat_template=use_chat_template, - output_dir="", - dataset_loading_processes=8, - save_details=False, - max_samples=10, - custom_tasks=CUSTOM_TASKS_PATH, - ) - return results - - -def generate_tests() -> list[ModelInput]: - """Generate test parameters for all models and tasks.""" - - tests = [] - for model_args in MODELS_ARGS: - predictions_lite = partial(run_model, model_args["model_name"], model_args["use_chat_template"]) - tests.append((model_args, predictions_lite)) - - return tests - - -# generates the model predictions parameters at test collection time -tests: list[ModelInput] = generate_tests() -ids = [f"{model_input[0]['model_name']}" for model_input in tests] - - -@pytest.mark.slow -@pytest.mark.parametrize("tests", tests, ids=ids) -def test_accelerate_model_prediction(tests: list[ModelInput]): - """Evaluates a model on a full task - is parametrized using pytest_generate_test""" - model_args, get_predictions = tests - - # Load the reference results - with open(model_args["results_file"], "r") as f: - reference_results = json.load(f)["results"] - - # Change the key names, replace '|' with ':' - reference_results = {k.replace("|", ":"): v for k, v in reference_results.items()} - - # Get the predictions - predictions = get_predictions()["results"] - - # Convert defaultdict values to regular dict for comparison - predictions_dict = {k: dict(v) if hasattr(v, "default_factory") else v for k, v in predictions.items()} - - # Compare the predictions with the reference results - diff = DeepDiff(reference_results, predictions_dict, ignore_numeric_type_changes=True, math_epsilon=0.05) - - assert diff == {}, f"Differences found: {diff}" diff --git a/tests/slow_tests/test_vllm_model.py b/tests/slow_tests/test_vllm_model.py deleted file mode 100644 index e7d6d5381..000000000 --- a/tests/slow_tests/test_vllm_model.py +++ /dev/null @@ -1,103 +0,0 @@ -# MIT License - -# Copyright (c) 2024 The HuggingFace Team - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -import json -import os -from functools import lru_cache, partial -from typing import Callable, Tuple - -import pytest -from deepdiff import DeepDiff - -from lighteval.main_vllm import vllm # noqa: E402 - - -# Set env var for deterministic run of models -os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" - -MODELS_ARGS = [ - # {"model_name": "gpt2", "use_chat_template": False, "revision": "main", "results_file": "tests/reference_scores/gpt2-results.json"}, - { - "model_name": "examples/model_configs/vllm_model_config.yaml", - "use_chat_template": True, - "results_file": "tests/reference_scores/SmolLM2-1.7B-Instruct-results-vllm.json", - } -] -TASKS_PATH = "examples/test_tasks.txt" -CUSTOM_TASKS_PATH = "examples/custom_tasks_tests.py" - -ModelInput = Tuple[str, Callable[[], dict]] - - -@lru_cache(maxsize=len(MODELS_ARGS)) -def run_model(model_name: str, use_chat_template: bool): - """Runs the full main as a black box, using the input model and tasks, on 10 samples without parallelism""" - results = vllm( - model_args=model_name, - tasks=TASKS_PATH, - use_chat_template=use_chat_template, - output_dir="", - dataset_loading_processes=8, - save_details=False, - max_samples=10, - custom_tasks=CUSTOM_TASKS_PATH, - ) - return results - - -def generate_tests() -> list[ModelInput]: - """Generate test parameters for all models and tasks.""" - - tests = [] - for model_args in MODELS_ARGS: - predictions_lite = partial(run_model, model_args["model_name"], model_args["use_chat_template"]) - tests.append((model_args, predictions_lite)) - - return tests - - -# generates the model predictions parameters at test collection time -tests: list[ModelInput] = generate_tests() -ids = [f"{model_input[0]['model_name']}" for model_input in tests] - - -@pytest.mark.slow -@pytest.mark.parametrize("tests", tests, ids=ids) -def test_vllm_model(tests: list[ModelInput]): - """Evaluates a model on a full task - is parametrized using pytest_generate_test""" - model_args, get_predictions = tests - - predictions = get_predictions()["results"] - - # Load the reference results - with open(model_args["results_file"], "r") as f: - reference_results = json.load(f)["results"] - - # Change the key names, replace '|' with ':' - reference_results = {k.replace("|", ":"): v for k, v in reference_results.items()} - - # Convert defaultdict values to regular dict for comparison - predictions_dict = {k: dict(v) if hasattr(v, "default_factory") else v for k, v in predictions.items()} - - diff = DeepDiff(reference_results, predictions_dict, ignore_numeric_type_changes=True, math_epsilon=0.05) - - assert diff == {}, f"Differences found: {diff}" diff --git a/tests/test_main.py b/tests/test_main.py new file mode 100644 index 000000000..d0c85b33c --- /dev/null +++ b/tests/test_main.py @@ -0,0 +1,126 @@ +# MIT License + +# Copyright (c) 2024 The HuggingFace Team + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +"""This file should be launched using `python -m pytest script_name.py`. It must stay at the same level or above as main""" +import os +from functools import lru_cache, partial +from typing import Callable, List, Literal, Tuple + +import pytest +from pytest import approx + +from lighteval.main_accelerate import accelerate # noqa: E402 +from tests.reference_scores.reference_task_scores import RESULTS_FULL, RESULTS_LITE # noqa: E402 +from tests.reference_scores.reference_tasks import ALL_SUBSETS + + +# Set env var for deterministic run of models +os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" + +# To add new models or tasks, change here +# ! The correct results must be present in reference_task_scores +MODELS = ["gpt2"] +TASKS = ALL_SUBSETS +FULL_TEST = os.environ.get("LIGHTEVAL_FULL_TEST", False) +ModelInput = Tuple[str, str, str, str, Callable[[], dict], float] + + +# Caching here to avoid re-running predictions for every single test, the size should be >= MODELS +@lru_cache(maxsize=len(MODELS)) +def run_model_predictions_full(model: str, tasks: tuple): + """Runs the full main as a black box, using the input model and tasks, on all samples without parallelism""" + results = accelerate( + model_args=f"pretrained={model}", + tasks=",".join(tasks), + override_batch_size=1, + output_dir="", + dataset_loading_processes=1, + save_details=True, + ) + return results + + +@lru_cache(maxsize=len(MODELS)) +def run_model_predictions_lite(model: str, tasks: tuple): + """Runs the full main as a black box, using the input model and tasks, on 10 samples without parallelism""" + results = accelerate( + model_args=f"pretrained={model}", + tasks=",".join(tasks), + override_batch_size=1, + output_dir="", + dataset_loading_processes=1, + save_details=True, + max_samples=10, + ) + return results + + +def generate_test_parameters(tasks: List[str]) -> List[ModelInput]: + """Generate test parameters for all models and tasks.""" + + def generate_model_parameters( + model: str, test_type: Literal["full", "lite"], prediction_func: Callable + ) -> List[ModelInput]: + results = RESULTS_FULL if test_type == "full" else RESULTS_LITE + return [ + (model, test_type, normalize_eval_name(eval_name), metric, prediction_func, reference) + for eval_name in tasks + for metric, reference in results[model][eval_name].items() + ] + + parameters = [] + for model in MODELS: + if FULL_TEST: + # Don't call the function during collection!! Very expensive + predictions_full = partial(run_model_predictions_full, model, tuple(tasks)) + parameters.extend(generate_model_parameters(model, "full", predictions_full)) + else: + predictions_lite = partial(run_model_predictions_lite, model, tuple(tasks)) + parameters.extend(generate_model_parameters(model, "lite", predictions_lite)) + + return parameters + + +def normalize_eval_name(eval_name: str) -> str: + """Normalize evaluation name by removing the last part if it has 4 components.""" + parts = eval_name.split("|") + return "|".join(parts[:3]) if len(parts) == 4 else eval_name + + +# generates the model predictions parameters at test collection time +parameters: list[ModelInput] = generate_test_parameters(TASKS) +ids = [f"{model_input[0]}_{model_input[1]}_{model_input[2]}_{model_input[3]}" for model_input in parameters] + + +@pytest.mark.parametrize("model_input", parameters, ids=ids) +def test_model_prediction(model_input: ModelInput): + """Evaluates a model on a full task - is parametrized using pytest_generate_test""" + model_name, test_type, eval_name, metric, get_predictions, reference = model_input + prediction = get_predictions()["results"][eval_name.replace("|", ":")][metric] + assert reference == approx( + prediction, rel=1e-4 + ), f"Model {model_name} on {test_type} samples, for eval {eval_name}, metric {metric} incorrect" + + +if __name__ == "__main__": + parameters = generate_test_parameters(TASKS) + print(parameters) From 3e3bb58aebc27d143c8115f106c15e568b654e0b Mon Sep 17 00:00:00 2001 From: nouamanetazi Date: Mon, 28 Apr 2025 18:04:29 +0000 Subject: [PATCH 11/13] fixes --- .../models/nanotron/nanotron_model.py | 80 ++++++++++--------- 1 file changed, 42 insertions(+), 38 deletions(-) diff --git a/src/lighteval/models/nanotron/nanotron_model.py b/src/lighteval/models/nanotron/nanotron_model.py index 724c66ec9..98d429118 100644 --- a/src/lighteval/models/nanotron/nanotron_model.py +++ b/src/lighteval/models/nanotron/nanotron_model.py @@ -56,7 +56,7 @@ ) from lighteval.utils.imports import is_nanotron_available from lighteval.utils.parallelism import find_executable_batch_size -from lighteval.utils.utils import EnvConfig, as_list +from lighteval.utils.utils import as_list logger = logging.getLogger(__name__) @@ -96,12 +96,12 @@ def __init__( parallel_context: "ParallelContext", max_gen_toks: Optional[int] = 256, max_length: Optional[int] = None, - add_special_tokens: Optional[bool] = True, + add_special_tokens: Optional[bool] = False, dtype: Optional[Union[str, torch.dtype]] = None, trust_remote_code: bool = False, debug_one_layer_model: bool = False, model_class: Optional[Type] = None, - env_config: EnvConfig = None, + env_config: "EnvConfig" = None, ): """Initializes a nanotron model for evaluation. Args: @@ -230,7 +230,7 @@ def _create_auto_tokenizer( *, pretrained: str, tokenizer: Optional[str] = None, - env_config: EnvConfig = None, + env_config: "EnvConfig" = None, trust_remote_code: bool = False, ) -> transformers.PreTrainedTokenizer: """Returns a pre-trained tokenizer from a pre-trained tokenizer configuration.""" @@ -446,6 +446,7 @@ def loglikelihood(self, requests: List[LoglikelihoodRequest], override_bs=None) """Tokenize the context and continuation and compute the log likelihood of those tokenized sequences. """ + # requests = requests[:10256] for request in tqdm( requests, desc="Tokenizing", disable=bool(dist.get_rank(self.parallel_context.world_pg) != 0) ): @@ -495,10 +496,8 @@ def prepare_batch( We truncate to keep only at most `max_context` tokens We pad to `padding_length` tokens """ - # if not full_attention_masks: - # raise ValueError( - # "Only full attention masks are supported for now - fix Flash Attention 2 support for more" - # ) + assert full_attention_masks == False, "full_attention_masks=True means we would be doing attention of padding tokens, which would affect negatively the results." + assert pad_on_left == False, "pad_on_left=True not supported yet, see TODOs below" current_pp_rank = dist.get_rank(self.parallel_context.pp_pg) if current_pp_rank != self.input_pp_rank: @@ -514,25 +513,26 @@ def prepare_batch( if max_context is None: max_context = self.max_length - if max_context % self.parallel_config.tp != 0: - # We need to round up to the next multiple of self.parallel_config.tp - if (max_context + (self.parallel_config.tp - max_context % self.parallel_config.tp)) < self.max_length: - # We can add some tokens - max_context = max_context + (self.parallel_config.tp - max_context % self.parallel_config.tp) - else: - # We need to remove some tokens - max_context = max_context - (max_context % self.parallel_config.tp) - - if padding_length % self.parallel_config.tp != 0: - # We need to round up to the next multiple of self.parallel_config.tp - if ( - padding_length + (self.parallel_config.tp - padding_length % self.parallel_config.tp) - ) < self.max_length: - # We can add some tokens - padding_length = padding_length + (self.parallel_config.tp - padding_length % self.parallel_config.tp) - else: - # We need to remove some tokens - padding_length = padding_length - (padding_length % self.parallel_config.tp) + assert self.parallel_config.tp_mode == TensorParallelLinearMode.ALL_REDUCE, "No reason to have tp_mode==REDUCE_SCATTER when doing inference" + # if max_context % self.parallel_config.tp != 0: + # # We need to round up to the next multiple of self.parallel_config.tp + # if (max_context + (self.parallel_config.tp - max_context % self.parallel_config.tp)) < self.max_length: + # # We can add some tokens + # max_context = max_context + (self.parallel_config.tp - max_context % self.parallel_config.tp) + # else: + # # We need to remove some tokens + # max_context = max_context - (max_context % self.parallel_config.tp) + + # if padding_length % self.parallel_config.tp != 0: + # # We need to round up to the next multiple of self.parallel_config.tp + # if ( + # padding_length + (self.parallel_config.tp - padding_length % self.parallel_config.tp) + # ) < self.max_length: + # # We can add some tokens + # padding_length = padding_length + (self.parallel_config.tp - padding_length % self.parallel_config.tp) + # else: + # # We need to remove some tokens + # padding_length = padding_length - (padding_length % self.parallel_config.tp) # because vectorizing is annoying, we first convert each (context, continuation) pair to padded # tensors, then we pack them together into a batch, call the model, and then pick it all apart @@ -574,7 +574,7 @@ def prepare_batch( if pad_on_left: inp = torch.cat( [ - torch.zeros(padding_length - inplen, dtype=torch.long), # [padding_length - seq] + torch.zeros(padding_length - inplen, dtype=torch.long), # [padding_length - seq] #TODO: padding_token not always 0 inp, # [seq] ], dim=0, @@ -583,7 +583,7 @@ def prepare_batch( inp = torch.cat( [ inp, # [seq] - torch.zeros(padding_length - inplen, dtype=torch.long), # [padding_length - seq] + torch.zeros(padding_length - inplen, dtype=torch.long), # [padding_length - seq] #TODO: padding_token not always 0 ], dim=0, ) @@ -713,10 +713,9 @@ def _loglikelihood_single_token( inputs = [item.tokenized_context for item in batch_data] batch_model = self.prepare_batch( - inputs, padding_length=max_context, max_context=max_context, full_attention_masks=True + inputs, padding_length=max_context, max_context=max_context, full_attention_masks=False ) # batched_inputs, batch_attention, input_lengths, truncated, padded - position_ids = ( torch.arange(batch_model.input_ids.shape[1], device=self.device, dtype=torch.int32) .unsqueeze(0) @@ -949,16 +948,21 @@ def _loglikelihood_tokens( inputs = [ item.tokenized_context + item.tokenized_continuation[:-1] for item in batch_data ] # The last token doesn't need to be input in the model + + pad_on_left = False # if we unpad in modeling, it doesn't matter if left or right # TODO: not supported yet batch_model = self.prepare_batch( - inputs, padding_length=max_context, max_context=max_context, full_attention_masks=True + inputs, padding_length=max_context, max_context=max_context, full_attention_masks=False, pad_on_left=pad_on_left ) # batched_inputs, batch_attention, input_lengths, truncated, padded with torch.no_grad(): - position_ids = ( - torch.arange(batch_model.input_ids.shape[1], device=self.device, dtype=torch.int32) - .unsqueeze(0) - .repeat(batch_model.input_ids.shape[0], 1) - ) + # Create sequential position_ids initialized to -1 (for padding) + position_ids = torch.full(batch_model.input_ids.shape, fill_value=-1, dtype=torch.long) + + for i, length in enumerate(batch_model.input_lengths): + if pad_on_left: + position_ids[i, -length:] = torch.arange(length, device=self.device, dtype=torch.int32) + else: + position_ids[i, :length] = torch.arange(length, device=self.device, dtype=torch.int32) out = self.model(input_ids=batch_model.input_ids, position_ids=position_ids) if dist.get_rank(self.parallel_context.pp_pg) == self.output_pp_rank: @@ -997,7 +1001,7 @@ def _loglikelihood_tokens( cur_logits = ( cur_logits[inplen - contlen : inplen].unsqueeze(0).to(self.device) - ) # [1, seq, voc] + ) # [1, seq, voc] #TODO: this assumes padding on the right cont_toks = cont_toks.unsqueeze(0).to(self.device) # [1, seq] # Check if per-token argmax is exactly equal to continuation From a12afc6bcc281b1bf8ac89336817deb59a1abe3c Mon Sep 17 00:00:00 2001 From: Hynek Kydlicek Date: Thu, 8 May 2025 13:46:43 +0000 Subject: [PATCH 12/13] Only use relevant parts of nanotron config / make qa evals cheaper for probs --- src/lighteval/config/lighteval_config.py | 7 ++++--- src/lighteval/main_nanotron.py | 21 +++++++++++-------- .../models/nanotron/nanotron_model.py | 7 ++++--- src/lighteval/tasks/default_prompts.py | 2 ++ src/lighteval/tasks/templates/qa.py | 5 ++++- 5 files changed, 26 insertions(+), 16 deletions(-) diff --git a/src/lighteval/config/lighteval_config.py b/src/lighteval/config/lighteval_config.py index 100ab5431..3fe6f0028 100644 --- a/src/lighteval/config/lighteval_config.py +++ b/src/lighteval/config/lighteval_config.py @@ -27,11 +27,10 @@ if is_nanotron_available(): - from nanotron.config import Config + from nanotron.config import ModelArgs, TokenizerArgs, GeneralArgs from nanotron.config.parallelism_config import ParallelismArgs from nanotron.generation.sampler import SamplerType from nanotron.logging import get_logger - logger = get_logger(__name__) DEFAULT_GENERATION_SEED = 42 @@ -100,7 +99,9 @@ class LightEvalConfig: @dataclass class FullNanotronConfig: lighteval_config: LightEvalConfig - nanotron_config: "Config" + nanotron_model: "ModelArgs" + nanotron_tokenizer: "TokenizerArgs" + nanotron_general: "GeneralArgs" @property def generation_parameters(self): diff --git a/src/lighteval/main_nanotron.py b/src/lighteval/main_nanotron.py index 1597475de..5609c751a 100644 --- a/src/lighteval/main_nanotron.py +++ b/src/lighteval/main_nanotron.py @@ -25,6 +25,8 @@ from typer import Option from typing_extensions import Annotated +import yaml +from yaml import SafeLoader CACHE_DIR: str = os.getenv("HF_HOME", "/scratch") @@ -48,7 +50,7 @@ def nanotron( """ Evaluate models using nanotron as backend. """ - from nanotron.config import Config, get_config_from_file + from nanotron.config import get_config_from_dict, get_config_from_file, ModelArgs, TokenizerArgs, GeneralArgs from lighteval.config.lighteval_config import FullNanotronConfig, LightEvalConfig from lighteval.logging.evaluation_tracker import EvaluationTracker @@ -65,18 +67,19 @@ def nanotron( if not checkpoint_config_path.endswith(".yaml"): raise ValueError("The checkpoint path should point to a YAML file") - model_config = get_config_from_file( - checkpoint_config_path, - config_class=Config, - model_config_class=None, + with open(checkpoint_config_path) as f: + nanotron_yaml = yaml.load(f, Loader=SafeLoader) + + model_config, tokenizer_config, general_config = [get_config_from_dict( + nanotron_yaml[key], + config_class=config_class, skip_unused_config_keys=True, skip_null_keys=True, - ) + ) for key, config_class in [("model", ModelArgs), ("tokenizer", TokenizerArgs), ("general", GeneralArgs)]] # Load lighteval config lighteval_config: LightEvalConfig = get_config_from_file(lighteval_config_path, config_class=LightEvalConfig) # type: ignore - - nanotron_config = FullNanotronConfig(lighteval_config, model_config) + nanotron_config = FullNanotronConfig(lighteval_config, model_config, tokenizer_config, general_config) evaluation_tracker = EvaluationTracker( output_dir=lighteval_config.logging.output_dir, @@ -86,7 +89,7 @@ def nanotron( push_to_tensorboard=lighteval_config.logging.push_to_tensorboard, save_details=lighteval_config.logging.save_details, tensorboard_metric_prefix=lighteval_config.logging.tensorboard_metric_prefix, - nanotron_run_info=nanotron_config.nanotron_config.general, + nanotron_run_info=nanotron_config.nanotron_general, ) pipeline_parameters = PipelineParameters( diff --git a/src/lighteval/models/nanotron/nanotron_model.py b/src/lighteval/models/nanotron/nanotron_model.py index 98d429118..07c572dbd 100644 --- a/src/lighteval/models/nanotron/nanotron_model.py +++ b/src/lighteval/models/nanotron/nanotron_model.py @@ -106,8 +106,8 @@ def __init__( """Initializes a nanotron model for evaluation. Args: """ - model_args = nanotron_config.nanotron_config.model - tokenizer = nanotron_config.nanotron_config.tokenizer + model_args = nanotron_config.nanotron_model + tokenizer = nanotron_config.nanotron_tokenizer lighteval_config = nanotron_config.lighteval_config parallel_config = nanotron_config.lighteval_config.parallelism @@ -218,7 +218,7 @@ def __init__( self.pairwise_tokenization = nanotron_config.lighteval_config.tasks.pairwise_tokenization self.model_info = ModelInfo( - model_name=f"{nanotron_config.nanotron_config.general.run}/{nanotron_config.nanotron_config.general.step}" + model_name=f"{nanotron_config.nanotron_general.run}/{nanotron_config.nanotron_general.step}" ) @property @@ -1264,6 +1264,7 @@ def greedy_until( max_new_tokens=max_new_tokens, max_micro_batch_size=batch_size, # ok for PP=1 for PP>1 we'll need to split the batch returns_logits=returns_logits, + tokenizer=self.tokenizer, generation_config=self.generation_config, ) dist.barrier() # Got everyone to send their stuff diff --git a/src/lighteval/tasks/default_prompts.py b/src/lighteval/tasks/default_prompts.py index 3745a7724..cd92468b7 100644 --- a/src/lighteval/tasks/default_prompts.py +++ b/src/lighteval/tasks/default_prompts.py @@ -2713,3 +2713,5 @@ def xsum(line, task_name: str = None): choices=[str(line["summary"])], specific={"text": line["article"]}, ) + + diff --git a/src/lighteval/tasks/templates/qa.py b/src/lighteval/tasks/templates/qa.py index 2ae8d7c8d..e798f820d 100644 --- a/src/lighteval/tasks/templates/qa.py +++ b/src/lighteval/tasks/templates/qa.py @@ -70,9 +70,12 @@ def adapter_for_mcq(line: dict) -> MCQInput | None: if input_data is None: return None + choices = list(set(input_data["choices"])) + return { **input_data, - "gold_idx": list(range(len(input_data["choices"]))), + "gold_idx": list(range(len(choices))), + "choices": choices, } multichoice_prompt_fn = get_mcq_prompt_function(language, adapter=adapter_for_mcq, formulation=CFFormulation()) From c2f702b655f8439a069e3bcd7d1216ee267ceada Mon Sep 17 00:00:00 2001 From: Hynek Kydlicek Date: Mon, 12 May 2025 14:34:55 +0000 Subject: [PATCH 13/13] add new tasks --- src/lighteval/config/lighteval_config.py | 4 +- src/lighteval/tasks/multilingual/adapters.py | 15 ++ src/lighteval/tasks/multilingual/tasks.py | 239 +++++++++++++++++++ 3 files changed, 255 insertions(+), 3 deletions(-) diff --git a/src/lighteval/config/lighteval_config.py b/src/lighteval/config/lighteval_config.py index 3fe6f0028..d76f3179c 100644 --- a/src/lighteval/config/lighteval_config.py +++ b/src/lighteval/config/lighteval_config.py @@ -38,7 +38,7 @@ @dataclass class GenerationArgs: - sampler: Optional[Union[str, "SamplerType"]] = None + sampler: Optional["SamplerType"] = None temperature: Optional[float] = None top_k: Optional[int] = None top_p: Optional[float] = None @@ -48,8 +48,6 @@ class GenerationArgs: use_cache: Optional[bool] = False def __post_init__(self): - if isinstance(self.sampler, str): - self.sampler = SamplerType[self.sampler.upper()] if self.seed is None: self.seed = DEFAULT_GENERATION_SEED diff --git a/src/lighteval/tasks/multilingual/adapters.py b/src/lighteval/tasks/multilingual/adapters.py index adce8c662..59b39fcd6 100644 --- a/src/lighteval/tasks/multilingual/adapters.py +++ b/src/lighteval/tasks/multilingual/adapters.py @@ -283,3 +283,18 @@ def get_mkqa_adapter(lang: Language, line: dict) -> QAInput | None: "question": line["queries"][lang_key], "choices": answers, } + + +def enem_adapter(lang: Language, line: dict) -> MCQInput | None: + if line["label"] == "Anulado": + return None + + question = line["question"] + for desc in line["description"]: + question = question.replace("[[placeholder]]", desc, 1) # Replace only first occurrence each time + + return { + "question": question, + "choices": line["alternatives"], + "gold_idx": LETTER_INDICES.index(line["label"]), + } diff --git a/src/lighteval/tasks/multilingual/tasks.py b/src/lighteval/tasks/multilingual/tasks.py index cf5525b8a..8b185d49a 100644 --- a/src/lighteval/tasks/multilingual/tasks.py +++ b/src/lighteval/tasks/multilingual/tasks.py @@ -60,6 +60,8 @@ from lighteval.tasks.templates.utils.translation_literals import TRANSLATION_LITERALS from lighteval.utils.language import Language, iso_639_3_ind_to_iso_639_3_macro +from lighteval.tasks.multilingual.adapters import enem_adapter + TASKS_TABLE = [] # ------------------------------- NLI Tasks ------------------------------- # @@ -123,6 +125,7 @@ for formulation in [MCFFormulation(), CFFormulation(), HybridFormulation()] ] + # Improvement on XNLI with better translation, from our experience models tend to # perform better on XNLI2.0 than XNLI # https://arxiv.org/abs/2301.06527 @@ -857,6 +860,66 @@ ] ] +# GermanQuAD: High-quality German QA dataset with 13,722 questions +# https://arxiv.org/abs/2104.12741 +germanquad_tasks = [ + LightevalTaskConfig( + name=f"germanquad_{Language.GERMAN.value}", + prompt_function=get_qa_prompt_function( + Language.GERMAN, + lambda line: { + "question": line["question"], + "context": line["context"], + "choices": [ans for ans in line["answers"]["text"] if len(ans) > 0], + }, + ), + suite=("lighteval",), + hf_repo="deepset/germanquad", + hf_subset="plain_text", + trust_dataset=True, + hf_revision="fff05ceaf2ffbe5b65c7e0c57e678f7b7e1a0581", + hf_filter=lambda line: any(len(ans) > 0 for ans in line["answers"]["text"]), + evaluation_splits=("test",), + few_shots_split="train", + generation_size=400, + stop_sequence=("\n",), + metric=( + multilingual_quasi_exact_match_metric(Language.GERMAN, "prefix"), + multilingual_quasi_f1_score_metric(Language.GERMAN), + ), + ) +] + + +# SQuAD-it: Italian translation of the SQuAD dataset +# https://github.com/crux82/squad-it +squad_it_tasks = [ + LightevalTaskConfig( + name=f"squad_{Language.ITALIAN.value}", + prompt_function=get_qa_prompt_function( + Language.ITALIAN, + lambda line: { + "question": line["question"], + "context": line["context"], + "choices": [ans for ans in line["answers"]["text"] if len(ans) > 0], + }, + ), + suite=("lighteval",), + hf_repo="crux82/squad_it", + hf_subset="default", + hf_filter=lambda line: any(len(ans) > 0 for ans in line["answers"]["text"]), + evaluation_splits=("test",), + few_shots_split="train", + generation_size=400, + stop_sequence=("\n",), + metric=( + multilingual_quasi_exact_match_metric(Language.ITALIAN, "prefix"), + multilingual_quasi_f1_score_metric(Language.ITALIAN), + ), + ) +] + + # ThaiQA: A question answering dataset for the Thai language. thaiqa_tasks = [ LightevalTaskConfig( @@ -910,6 +973,67 @@ ) ] +# FaQuAD: A Portuguese Reading Comprehension Dataset +# https://arxiv.org/abs/2007.15671 +faquad_tasks = [ + LightevalTaskConfig( + name=f"faquad_{Language.PORTUGUESE.value}", + prompt_function=get_qa_prompt_function( + Language.PORTUGUESE, + lambda line: { + "question": line["question"], + "context": line["context"], + "choices": [ans for ans in line["answers"]["text"] if len(ans) > 0], + }, + ), + suite=("lighteval",), + hf_repo="eraldoluis/faquad", + hf_subset="plain_text", + trust_dataset=True, + hf_revision="205ba826a2282a4a5aa9bd3651e55ee4f2da1546", + hf_filter=lambda line: any(len(ans) > 0 for ans in line["answers"]["text"]), + evaluation_splits=("validation",), + few_shots_split="train", + metric=( + multilingual_quasi_exact_match_metric(Language.PORTUGUESE, "prefix"), + multilingual_quasi_f1_score_metric(Language.PORTUGUESE), + ), + generation_size=400, + stop_sequence=("\n",), + ) +] + + +# SQuAD-es: Spanish translation of the Stanford Question Answering Dataset +# https://huggingface.co/datasets/ccasimiro/squad_es +squad_es_tasks = [ + LightevalTaskConfig( + name=f"squad_{Language.SPANISH.value}", + prompt_function=get_qa_prompt_function( + Language.SPANISH, + lambda line: { + "question": line["question"], + "context": line["context"], + "choices": [ans for ans in line["answers"]["text"] if len(ans) > 0], + }, + ), + suite=("lighteval",), + hf_repo="ccasimiro/squad_es", + hf_subset="v2.0.0", + hf_filter=lambda line: any(len(ans) > 0 for ans in line["answers"]["text"]), + evaluation_splits=("validation",), + few_shots_split="train", + metric=( + multilingual_quasi_exact_match_metric(Language.SPANISH, "prefix"), + multilingual_quasi_f1_score_metric(Language.SPANISH), + ), + generation_size=400, + stop_sequence=("\n",), + ) +] + + + # ARCD: Arabic Reading Comprehension Dataset. # https://arxiv.org/pdf/1906.05394 arcd_tasks = [ @@ -1464,6 +1588,10 @@ *race_ar_task, *belebele_tasks, *c3_tasks, + *squad_it_tasks, + *squad_es_tasks, + *faquad_tasks, + *germanquad_tasks, ] ) @@ -2994,6 +3122,41 @@ ] ] +# Spanish version of OpenBookQA from BSC Language Technology group +# Dataset: https://huggingface.co/datasets/BSC-LT/openbookqa-es +openbook_es_tasks = [ + LightevalTaskConfig( + name=f"openbookqa_{Language.SPANISH.value}_{formulation.name.lower()}", + prompt_function=get_mcq_prompt_function( + Language.SPANISH, + lambda line: { + "question": line["question_stem"], + "choices": line["choices"]["text"], + "gold_idx": LETTER_INDICES.index(line["answerKey"]), + }, + formulation=formulation, + ), + suite=["lighteval"], + hf_repo="BSC-LT/openbookqa-es", + hf_subset="default", + evaluation_splits=("test",), + few_shots_split="validation", + metric=get_metrics_for_formulation( + formulation, + [ + loglikelihood_acc_metric(normalization=LogProbTokenNorm()), + loglikelihood_acc_metric(normalization=LogProbCharNorm()), + ], + ), + ) + for formulation in [ + MCFFormulation(), + CFFormulation(), + HybridFormulation(), + ] +] + + # The Russian version is part of the MERA (Multilingual Enhanced Russian NLP Architectures) project. # Paper: https://arxiv.org/abs/2401.04531 openbook_rus_tasks = [ @@ -3032,6 +3195,7 @@ [ *openbook_rus_tasks, *openbook_ara_tasks, + *openbook_es_tasks, ] ) @@ -3376,6 +3540,79 @@ ] +# OAB Exams: A collection of questions from the Brazilian Bar Association exam +# The exam is required for anyone who wants to practice law in Brazil +# Dataset: https://huggingface.co/datasets/eduagarcia/oab_exams +oab_exams_tasks = [ + LightevalTaskConfig( + name=f"oab_exams_{Language.PORTUGUESE.value}_{formulation.name.lower()}", + prompt_function=get_mcq_prompt_function( + Language.PORTUGUESE, + lambda line: { + "question": line["question"], + "choices": line["choices"]["text"], + "gold_idx": LETTER_INDICES.index(line["answerKey"]), + }, + formulation=formulation, + ), + suite=("lighteval",), + hf_repo="eduagarcia/oab_exams", + hf_subset="default", + evaluation_splits=("train",), + hf_avail_splits=["train"], + metric=get_metrics_for_formulation( + formulation, + [ + loglikelihood_acc_metric(normalization=LogProbTokenNorm()), + loglikelihood_acc_metric(normalization=LogProbCharNorm()), + ], + ), + ) + for formulation in [ + MCFFormulation(), + CFFormulation(), + HybridFormulation(), + ] +] + +# ENEM (Exame Nacional do Ensino Médio) is a standardized Brazilian national secondary +# education examination. The exam is used both as a university admission test and as a +# high school evaluation test. +# Dataset: https://huggingface.co/datasets/maritaca-ai/enem +enem_tasks = [ + LightevalTaskConfig( + name=f"enem_{Language.PORTUGUESE.value}_{formulation.name.lower()}:{year}", + prompt_function=get_mcq_prompt_function( + Language.PORTUGUESE, + partial( + enem_adapter, + Language.PORTUGUESE, + ), + formulation=formulation, + ), + suite=("lighteval",), + hf_repo="maritaca-ai/enem", + hf_subset=year, + evaluation_splits=("train",), + hf_avail_splits=["train"], + metric=get_metrics_for_formulation( + formulation, + [ + loglikelihood_acc_metric(normalization=LogProbTokenNorm()), + loglikelihood_acc_metric(normalization=LogProbCharNorm()), + ], + ), + ) + for year in ["2022", "2023", "2024"] + for formulation in [ + MCFFormulation(), + CFFormulation(), + HybridFormulation(), + ] +] + + + # WorldTree is a dataset for multi-hop inference in science question answering. # It provides explanations for elementary science questions by combining facts from a semi-structured knowledge base. # This Russian version is part of the MERA (Multilingual Evaluation of Reasoning Abilities) benchmark. @@ -3417,6 +3654,8 @@ *agieval_tasks_zh, *worldtree_rus_tasks, *ceval_tasks, + *oab_exams_tasks, + *enem_tasks, ] )