@@ -4345,7 +4345,7 @@ def eval(
43454345 feval : callable, list of callable, or None, optional (default=None)
43464346 Customized evaluation function.
43474347 Each evaluation function should accept two parameters: preds, eval_data,
4348- and return (eval_name, eval_result, is_higher_better ) or list of such tuples.
4348+ and return (metric_name, metric_value, maximize ) or list of such tuples.
43494349
43504350 preds : numpy 1-D array or numpy 2-D array (for multi-class task)
43514351 The predicted values.
@@ -4354,17 +4354,17 @@ def eval(
43544354 e.g. they are raw margin instead of probability of positive class for binary task in this case.
43554355 eval_data : Dataset
43564356 A ``Dataset`` to evaluate.
4357- eval_name : str
4358- The name of evaluation function (without whitespace ).
4359- eval_result : float
4360- The eval result .
4361- is_higher_better : bool
4362- Is eval result higher better, e.g. AUC is ``is_higher_better`` .
4357+ metric_name : str
4358+ Unique identifier for the metric (e.g. "custom_adjusted_mse" ).
4359+ metric_value : float
4360+ Value of the evaluation metric .
4361+ maximize : bool
4362+ Are higher values better? e.g. ``True`` for AUC and ``False`` for binary error .
43634363
43644364 Returns
43654365 -------
43664366 result : list
4367- List with (dataset_name, eval_name, eval_result, is_higher_better ) tuples.
4367+ List with (dataset_name, metric_name, metric_value, maximize ) tuples.
43684368 """
43694369 if not isinstance (data , Dataset ):
43704370 raise TypeError ("Can only eval for Dataset instance" )
@@ -4394,7 +4394,7 @@ def eval_train(
43944394 feval : callable, list of callable, or None, optional (default=None)
43954395 Customized evaluation function.
43964396 Each evaluation function should accept two parameters: preds, eval_data,
4397- and return (eval_name, eval_result, is_higher_better ) or list of such tuples.
4397+ and return (metric_name, metric_value, maximize ) or list of such tuples.
43984398
43994399 preds : numpy 1-D array or numpy 2-D array (for multi-class task)
44004400 The predicted values.
@@ -4403,17 +4403,17 @@ def eval_train(
44034403 e.g. they are raw margin instead of probability of positive class for binary task in this case.
44044404 eval_data : Dataset
44054405 The training dataset.
4406- eval_name : str
4407- The name of evaluation function (without whitespace ).
4408- eval_result : float
4409- The eval result .
4410- is_higher_better : bool
4411- Is eval result higher better, e.g. AUC is ``is_higher_better`` .
4406+ metric_name : str
4407+ Unique identifier for the metric (e.g. "custom_adjusted_mse" ).
4408+ metric_value : float
4409+ Value of the evaluation metric .
4410+ maximize : bool
4411+ Are higher values better? e.g. ``True`` for AUC and ``False`` for binary error .
44124412
44134413 Returns
44144414 -------
44154415 result : list
4416- List with (train_dataset_name, eval_name, eval_result, is_higher_better ) tuples.
4416+ List with (train_dataset_name, metric_name, metric_value, maximize ) tuples.
44174417 """
44184418 return self .__inner_eval (data_name = self ._train_data_name , data_idx = 0 , feval = feval )
44194419
@@ -4428,7 +4428,7 @@ def eval_valid(
44284428 feval : callable, list of callable, or None, optional (default=None)
44294429 Customized evaluation function.
44304430 Each evaluation function should accept two parameters: preds, eval_data,
4431- and return (eval_name, eval_result, is_higher_better ) or list of such tuples.
4431+ and return (metric_name, metric_value, maximize ) or list of such tuples.
44324432
44334433 preds : numpy 1-D array or numpy 2-D array (for multi-class task)
44344434 The predicted values.
@@ -4437,17 +4437,17 @@ def eval_valid(
44374437 e.g. they are raw margin instead of probability of positive class for binary task in this case.
44384438 eval_data : Dataset
44394439 The validation dataset.
4440- eval_name : str
4441- The name of evaluation function (without whitespace ).
4442- eval_result : float
4443- The eval result .
4444- is_higher_better : bool
4445- Is eval result higher better, e.g. AUC is ``is_higher_better`` .
4440+ metric_name : str
4441+ Unique identifier for the metric (e.g. "custom_adjusted_mse" ).
4442+ metric_value : float
4443+ Value of the evaluation metric .
4444+ maximize : bool
4445+ Are higher values better? e.g. ``True`` for AUC and ``False`` for binary error .
44464446
44474447 Returns
44484448 -------
44494449 result : list
4450- List with (validation_dataset_name, eval_name, eval_result, is_higher_better ) tuples.
4450+ List with (validation_dataset_name, metric_name, metric_value, maximize ) tuples.
44514451 """
44524452 return [
44534453 item
@@ -5215,11 +5215,11 @@ def __inner_eval(
52155215 continue
52165216 feval_ret = eval_function (self .__inner_predict (data_idx = data_idx ), cur_data )
52175217 if isinstance (feval_ret , list ):
5218- for eval_name , val , is_higher_better in feval_ret :
5219- ret .append ((data_name , eval_name , val , is_higher_better ))
5218+ for metric_name , metric_value , maximize in feval_ret :
5219+ ret .append ((data_name , metric_name , metric_value , maximize ))
52205220 else :
5221- eval_name , val , is_higher_better = feval_ret
5222- ret .append ((data_name , eval_name , val , is_higher_better ))
5221+ metric_name , metric_value , maximize = feval_ret
5222+ ret .append ((data_name , metric_name , metric_value , maximize ))
52235223 return ret
52245224
52255225 def __inner_predict (self , * , data_idx : int ) -> np .ndarray :
0 commit comments