diff --git a/autoPyTorch/data/base_feature_validator.py b/autoPyTorch/data/base_feature_validator.py index 6ef7cae6b..2c4ce4de9 100644 --- a/autoPyTorch/data/base_feature_validator.py +++ b/autoPyTorch/data/base_feature_validator.py @@ -5,25 +5,13 @@ import pandas as pd -import scipy.sparse - from sklearn.base import BaseEstimator +from autoPyTorch.utils.common import SparseMatrixType from autoPyTorch.utils.logging_ import PicklableClientLogger -SUPPORTED_FEAT_TYPES = Union[ - List, - pd.DataFrame, - np.ndarray, - scipy.sparse.bsr_matrix, - scipy.sparse.coo_matrix, - scipy.sparse.csc_matrix, - scipy.sparse.csr_matrix, - scipy.sparse.dia_matrix, - scipy.sparse.dok_matrix, - scipy.sparse.lil_matrix, -] +SupportedFeatTypes = Union[List, pd.DataFrame, np.ndarray, SparseMatrixType] class BaseFeatureValidator(BaseEstimator): @@ -68,8 +56,8 @@ def __init__( def fit( self, - X_train: SUPPORTED_FEAT_TYPES, - X_test: Optional[SUPPORTED_FEAT_TYPES] = None, + X_train: SupportedFeatTypes, + X_test: Optional[SupportedFeatTypes] = None, ) -> BaseEstimator: """ Validates and fit a categorical encoder (if needed) to the features. @@ -77,10 +65,10 @@ def fit( CSR sparse data types are also supported Args: - X_train (SUPPORTED_FEAT_TYPES): + X_train (SupportedFeatTypes): A set of features that are going to be validated (type and dimensionality checks) and a encoder fitted in the case the data needs encoding - X_test (Optional[SUPPORTED_FEAT_TYPES]): + X_test (Optional[SupportedFeatTypes]): A hold out set of data used for checking """ @@ -109,11 +97,11 @@ def fit( def _fit( self, - X: SUPPORTED_FEAT_TYPES, + X: SupportedFeatTypes, ) -> BaseEstimator: """ Args: - X (SUPPORTED_FEAT_TYPES): + X (SupportedFeatTypes): A set of features that are going to be validated (type and dimensionality checks) and a encoder fitted in the case the data needs encoding Returns: @@ -124,11 +112,11 @@ def _fit( def transform( self, - X: SUPPORTED_FEAT_TYPES, + X: SupportedFeatTypes, ) -> np.ndarray: """ Args: - X_train (SUPPORTED_FEAT_TYPES): + X_train (SupportedFeatTypes): A set of features, whose categorical features are going to be transformed diff --git a/autoPyTorch/data/base_target_validator.py b/autoPyTorch/data/base_target_validator.py index 393f3d85b..ddbe384cb 100644 --- a/autoPyTorch/data/base_target_validator.py +++ b/autoPyTorch/data/base_target_validator.py @@ -5,26 +5,13 @@ import pandas as pd -import scipy.sparse - from sklearn.base import BaseEstimator +from autoPyTorch.utils.common import SparseMatrixType from autoPyTorch.utils.logging_ import PicklableClientLogger -SUPPORTED_TARGET_TYPES = Union[ - List, - pd.Series, - pd.DataFrame, - np.ndarray, - scipy.sparse.bsr_matrix, - scipy.sparse.coo_matrix, - scipy.sparse.csc_matrix, - scipy.sparse.csr_matrix, - scipy.sparse.dia_matrix, - scipy.sparse.dok_matrix, - scipy.sparse.lil_matrix, -] +SupportedTargetTypes = Union[List, pd.Series, pd.DataFrame, np.ndarray, SparseMatrixType] class BaseTargetValidator(BaseEstimator): @@ -69,17 +56,17 @@ def __init__(self, def fit( self, - y_train: SUPPORTED_TARGET_TYPES, - y_test: Optional[SUPPORTED_TARGET_TYPES] = None, + y_train: SupportedTargetTypes, + y_test: Optional[SupportedTargetTypes] = None, ) -> BaseEstimator: """ Validates and fit a categorical encoder (if needed) to the targets The supported data types are List, numpy arrays and pandas DataFrames. Args: - y_train (SUPPORTED_TARGET_TYPES) + y_train (SupportedTargetTypes) A set of targets set aside for training - y_test (Union[SUPPORTED_TARGET_TYPES]) + y_test (Union[SupportedTargetTypes]) A hold out set of data used of the targets. It is also used to fit the categories of the encoder. """ @@ -128,26 +115,26 @@ def fit( def _fit( self, - y_train: SUPPORTED_TARGET_TYPES, - y_test: Optional[SUPPORTED_TARGET_TYPES] = None, + y_train: SupportedTargetTypes, + y_test: Optional[SupportedTargetTypes] = None, ) -> BaseEstimator: """ Args: - y_train (SUPPORTED_TARGET_TYPES) + y_train (SupportedTargetTypes) The labels of the current task. They are going to be encoded in case of classification - y_test (Optional[SUPPORTED_TARGET_TYPES]) + y_test (Optional[SupportedTargetTypes]) A holdout set of labels """ raise NotImplementedError() def transform( self, - y: Union[SUPPORTED_TARGET_TYPES], + y: Union[SupportedTargetTypes], ) -> np.ndarray: """ Args: - y (SUPPORTED_TARGET_TYPES) + y (SupportedTargetTypes) A set of targets that are going to be encoded if the current task is classification Returns: @@ -158,7 +145,7 @@ def transform( def inverse_transform( self, - y: SUPPORTED_TARGET_TYPES, + y: SupportedTargetTypes, ) -> np.ndarray: """ Revert any encoding transformation done on a target array diff --git a/autoPyTorch/data/base_validator.py b/autoPyTorch/data/base_validator.py index 13bb421c7..bebddff49 100644 --- a/autoPyTorch/data/base_validator.py +++ b/autoPyTorch/data/base_validator.py @@ -7,8 +7,8 @@ from sklearn.base import BaseEstimator from sklearn.exceptions import NotFittedError -from autoPyTorch.data.base_feature_validator import SUPPORTED_FEAT_TYPES -from autoPyTorch.data.base_target_validator import SUPPORTED_TARGET_TYPES +from autoPyTorch.data.base_feature_validator import SupportedFeatTypes +from autoPyTorch.data.base_target_validator import SupportedTargetTypes class BaseInputValidator(BaseEstimator): @@ -40,10 +40,10 @@ def __init__( def fit( self, - X_train: SUPPORTED_FEAT_TYPES, - y_train: SUPPORTED_TARGET_TYPES, - X_test: Optional[SUPPORTED_FEAT_TYPES] = None, - y_test: Optional[SUPPORTED_TARGET_TYPES] = None, + X_train: SupportedFeatTypes, + y_train: SupportedTargetTypes, + X_test: Optional[SupportedFeatTypes] = None, + y_test: Optional[SupportedTargetTypes] = None, ) -> BaseEstimator: """ Validates and fit a categorical encoder (if needed) to the features, and @@ -59,15 +59,15 @@ def fit( + If performing a classification task, the data is going to be encoded Args: - X_train (SUPPORTED_FEAT_TYPES): + X_train (SupportedFeatTypes): A set of features that are going to be validated (type and dimensionality checks). If this data contains categorical columns, an encoder is going to be instantiated and trained with this data. - y_train (SUPPORTED_TARGET_TYPES): + y_train (SupportedTargetTypes): A set of targets that are going to be encoded if the task is for classification - X_test (Optional[SUPPORTED_FEAT_TYPES]): + X_test (Optional[SupportedFeatTypes]): A hold out set of features used for checking - y_test (SUPPORTED_TARGET_TYPES): + y_test (SupportedTargetTypes): A hold out set of targets used for checking. Additionally, if the current task is a classification task, this y_test categories are also going to be used to fit a pre-processing encoding (to prevent errors on unseen classes). @@ -96,16 +96,16 @@ def fit( def transform( self, - X: SUPPORTED_FEAT_TYPES, - y: Optional[SUPPORTED_TARGET_TYPES] = None, + X: SupportedFeatTypes, + y: Optional[SupportedTargetTypes] = None, ) -> Tuple[np.ndarray, Optional[np.ndarray]]: """ Transform the given target or features to a numpy array Args: - X (SUPPORTED_FEAT_TYPES): + X (SupportedFeatTypes): A set of features to transform - y (Optional[SUPPORTED_TARGET_TYPES]): + y (Optional[SupportedTargetTypes]): A set of targets to transform Returns: diff --git a/autoPyTorch/data/tabular_feature_validator.py b/autoPyTorch/data/tabular_feature_validator.py index 27ed18cfc..4bab001c6 100644 --- a/autoPyTorch/data/tabular_feature_validator.py +++ b/autoPyTorch/data/tabular_feature_validator.py @@ -16,7 +16,7 @@ from sklearn.impute import SimpleImputer from sklearn.pipeline import make_pipeline -from autoPyTorch.data.base_feature_validator import BaseFeatureValidator, SUPPORTED_FEAT_TYPES +from autoPyTorch.data.base_feature_validator import BaseFeatureValidator, SupportedFeatTypes def _create_column_transformer( @@ -117,7 +117,7 @@ def _comparator(cmp1: str, cmp2: str) -> int: def _fit( self, - X: SUPPORTED_FEAT_TYPES, + X: SupportedFeatTypes, ) -> BaseEstimator: """ In case input data is a pandas DataFrame, this utility encodes the user provided @@ -125,7 +125,7 @@ def _fit( will be able to use Args: - X (SUPPORTED_FEAT_TYPES): + X (SupportedFeatTypes): A set of features that are going to be validated (type and dimensionality checks) and an encoder fitted in the case the data needs encoding @@ -204,14 +204,14 @@ def _fit( def transform( self, - X: SUPPORTED_FEAT_TYPES, + X: SupportedFeatTypes, ) -> np.ndarray: """ Validates and fit a categorical encoder (if needed) to the features. The supported data types are List, numpy arrays and pandas DataFrames. Args: - X_train (SUPPORTED_FEAT_TYPES): + X_train (SupportedFeatTypes): A set of features, whose categorical features are going to be transformed @@ -276,13 +276,13 @@ def transform( def _check_data( self, - X: SUPPORTED_FEAT_TYPES, + X: SupportedFeatTypes, ) -> None: """ Feature dimensionality and data type checks Args: - X (SUPPORTED_FEAT_TYPES): + X (SupportedFeatTypes): A set of features that are going to be validated (type and dimensionality checks) and an encoder fitted in the case the data needs encoding """ @@ -429,8 +429,8 @@ def _get_columns_to_encode( def list_to_dataframe( self, - X_train: SUPPORTED_FEAT_TYPES, - X_test: Optional[SUPPORTED_FEAT_TYPES] = None, + X_train: SupportedFeatTypes, + X_test: Optional[SupportedFeatTypes] = None, ) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]: """ Converts a list to a pandas DataFrame. In this process, column types are inferred. @@ -438,10 +438,10 @@ def list_to_dataframe( If test data is provided, we proactively match it to train data Args: - X_train (SUPPORTED_FEAT_TYPES): + X_train (SupportedFeatTypes): A set of features that are going to be validated (type and dimensionality checks) and a encoder fitted in the case the data needs encoding - X_test (Optional[SUPPORTED_FEAT_TYPES]): + X_test (Optional[SupportedFeatTypes]): A hold out set of data used for checking Returns: diff --git a/autoPyTorch/data/tabular_target_validator.py b/autoPyTorch/data/tabular_target_validator.py index c37dc81c3..67b6001f8 100644 --- a/autoPyTorch/data/tabular_target_validator.py +++ b/autoPyTorch/data/tabular_target_validator.py @@ -13,14 +13,43 @@ from sklearn.exceptions import NotFittedError from sklearn.utils.multiclass import type_of_target -from autoPyTorch.data.base_target_validator import BaseTargetValidator, SUPPORTED_TARGET_TYPES +from autoPyTorch.data.base_target_validator import BaseTargetValidator, SupportedTargetTypes +from autoPyTorch.utils.common import SparseMatrixType + + +ArrayType = Union[np.ndarray, SparseMatrixType] + + +def _check_and_to_array(y: SupportedTargetTypes) -> ArrayType: + """ sklearn check array will make sure we have the correct numerical features for the array """ + return sklearn.utils.check_array(y, force_all_finite=True, accept_sparse='csr', ensure_2d=False) + + +def _modify_regression_target(y: ArrayType) -> ArrayType: + # Regression targets must have numbers after a decimal point. + # Ref: https://github.com/scikit-learn/scikit-learn/issues/8952 + y_min = np.abs(y).min() + offset = max(y_min, 1e-13) * 1e-13 # Sufficiently small number + if y_min > 1e12: + raise ValueError( + "The minimum value for the target labels of regression tasks must be smaller than " + f"1e12 to avoid errors caused by an overflow, but got {y_min}" + ) + + # Since it is all integer, we can just add a random small number + if isinstance(y, np.ndarray): + y = y.astype(dtype=np.float64) + offset + else: + y.data = y.data.astype(dtype=np.float64) + offset + + return y class TabularTargetValidator(BaseTargetValidator): def _fit( self, - y_train: SUPPORTED_TARGET_TYPES, - y_test: Optional[SUPPORTED_TARGET_TYPES] = None, + y_train: SupportedTargetTypes, + y_test: Optional[SupportedTargetTypes] = None, ) -> BaseEstimator: """ If dealing with classification, this utility encodes the targets. @@ -29,10 +58,10 @@ def _fit( errors Args: - y_train (SUPPORTED_TARGET_TYPES) + y_train (SupportedTargetTypes) The labels of the current task. They are going to be encoded in case of classification - y_test (Optional[SUPPORTED_TARGET_TYPES]) + y_test (Optional[SupportedTargetTypes]) A holdout set of labels """ if not self.is_classification or self.type_of_target == 'multilabel-indicator': @@ -94,16 +123,31 @@ def _fit( return self - def transform( - self, - y: Union[SUPPORTED_TARGET_TYPES], - ) -> np.ndarray: + def _transform_by_encoder(self, y: SupportedTargetTypes) -> np.ndarray: + if self.encoder is None: + return _check_and_to_array(y) + + # remove ravel warning from pandas Series + shape = np.shape(y) + if len(shape) > 1: + y = self.encoder.transform(y) + elif hasattr(y, 'iloc'): + # The Ordinal encoder expects a 2 dimensional input. + # The targets are 1 dimensional, so reshape to match the expected shape + y = cast(pd.DataFrame, y) + y = self.encoder.transform(y.to_numpy().reshape(-1, 1)).reshape(-1) + else: + y = self.encoder.transform(np.array(y).reshape(-1, 1)).reshape(-1) + + return _check_and_to_array(y) + + def transform(self, y: SupportedTargetTypes) -> np.ndarray: """ Validates and fit a categorical encoder (if needed) to the features. The supported data types are List, numpy arrays and pandas DataFrames. Args: - y (SUPPORTED_TARGET_TYPES) + y (SupportedTargetTypes) A set of targets that are going to be encoded if the current task is classification @@ -116,47 +160,23 @@ def transform( # Check the data here so we catch problems on new test data self._check_data(y) + y = self._transform_by_encoder(y) - if self.encoder is not None: - # remove ravel warning from pandas Series - shape = np.shape(y) - if len(shape) > 1: - y = self.encoder.transform(y) - else: - # The Ordinal encoder expects a 2 dimensional input. - # The targets are 1 dimensional, so reshape to match the expected shape - if hasattr(y, 'iloc'): - y = cast(pd.DataFrame, y) - y = self.encoder.transform(y.to_numpy().reshape(-1, 1)).reshape(-1) - else: - y = self.encoder.transform(np.array(y).reshape(-1, 1)).reshape(-1) - - # sklearn check array will make sure we have the - # correct numerical features for the array - # Also, a numpy array will be created - y = sklearn.utils.check_array( - y, - force_all_finite=True, - accept_sparse='csr', - ensure_2d=False, - ) - - # When translating a dataframe to numpy, make sure we - # honor the ravel requirement + # When translating a dataframe to numpy, make sure we honor the ravel requirement if y.ndim == 2 and y.shape[1] == 1: y = np.ravel(y) + if not self.is_classification and "continuous" not in type_of_target(y): + y = _modify_regression_target(y) + return y - def inverse_transform( - self, - y: SUPPORTED_TARGET_TYPES, - ) -> np.ndarray: + def inverse_transform(self, y: SupportedTargetTypes) -> np.ndarray: """ Revert any encoding transformation done on a target array Args: - y (Union[np.ndarray, pd.DataFrame, pd.Series]): + y (SupportedTargetTypes): Target array to be transformed back to original form before encoding Returns: np.ndarray: @@ -185,15 +205,12 @@ def inverse_transform( y = y.astype(self.dtype) return y - def _check_data( - self, - y: SUPPORTED_TARGET_TYPES, - ) -> None: + def _check_data(self, y: SupportedTargetTypes) -> None: """ Perform dimensionality and data type checks on the targets Args: - y (Union[np.ndarray, pd.DataFrame, pd.Series]): + y (SupportedTargetTypes): A set of features whose dimensionality and data type is going to be checked """ diff --git a/autoPyTorch/datasets/base_dataset.py b/autoPyTorch/datasets/base_dataset.py index 0f37e7938..be17b945d 100644 --- a/autoPyTorch/datasets/base_dataset.py +++ b/autoPyTorch/datasets/base_dataset.py @@ -49,6 +49,36 @@ def type_check(train_tensors: BaseDatasetInputType, check_valid_data(val_tensors[i]) +def _get_output_properties(train_tensors: BaseDatasetInputType) -> Tuple[int, str]: + """ + Return the dimension of output given a target_labels and output_type. + + Args: + train_tensors (BaseDatasetInputType): + Training data. + + Returns: + output_dim (int): + The dimension of outputs. + output_type (str): + The output type according to sklearn specification. + """ + if isinstance(train_tensors, Dataset): + target_labels = np.array([sample[-1] for sample in train_tensors]) + else: + target_labels = np.array(train_tensors[1]) + + output_type: str = type_of_target(target_labels) + if STRING_TO_OUTPUT_TYPES[output_type] in CLASSIFICATION_OUTPUTS: + output_dim = len(np.unique(target_labels)) + elif target_labels.ndim > 1: + output_dim = target_labels.shape[-1] + else: + output_dim = 1 + + return output_dim, output_type + + class TransformSubset(Subset): """Wrapper of BaseDataset for splitted datasets @@ -132,15 +162,7 @@ def __init__( self.issparse: bool = issparse(self.train_tensors[0]) self.input_shape: Tuple[int] = self.train_tensors[0].shape[1:] if len(self.train_tensors) == 2 and self.train_tensors[1] is not None: - self.output_type: str = type_of_target(self.train_tensors[1]) - - if ( - self.output_type in STRING_TO_OUTPUT_TYPES - and STRING_TO_OUTPUT_TYPES[self.output_type] in CLASSIFICATION_OUTPUTS - ): - self.output_shape = len(np.unique(self.train_tensors[1])) - else: - self.output_shape = self.train_tensors[1].shape[-1] if self.train_tensors[1].ndim > 1 else 1 + self.output_shape, self.output_type = _get_output_properties(self.train_tensors) # TODO: Look for a criteria to define small enough to preprocess self.is_small_preprocess = True diff --git a/autoPyTorch/utils/common.py b/autoPyTorch/utils/common.py index 1488d5fcd..b0620a7db 100644 --- a/autoPyTorch/utils/common.py +++ b/autoPyTorch/utils/common.py @@ -20,6 +20,15 @@ from torch.utils.data.dataloader import default_collate HyperparameterValueType = Union[int, str, float] +SparseMatrixType = Union[ + scipy.sparse.bsr_matrix, + scipy.sparse.coo_matrix, + scipy.sparse.csc_matrix, + scipy.sparse.csr_matrix, + scipy.sparse.dia_matrix, + scipy.sparse.dok_matrix, + scipy.sparse.lil_matrix, +] class FitRequirement(NamedTuple): diff --git a/test/test_api/test_api.py b/test/test_api/test_api.py index e3603f668..4346ff2b6 100644 --- a/test/test_api/test_api.py +++ b/test/test_api/test_api.py @@ -904,3 +904,29 @@ def test_tabular_classification_test_evaluator(openml_id, backend, n_samples): assert 'opt_loss' in incumbent_results, "run history: {}, successful_num_run: {}".format(estimator.run_history.data, successful_num_run) assert 'train_loss' in incumbent_results + + +@pytest.mark.parametrize("ans,task_class", ( + ("continuous", TabularRegressionTask), + ("multiclass", TabularClassificationTask)) +) +def test_task_inference(ans, task_class, backend): + # Get the data and check that contents of data-manager make sense + X = np.random.random((6, 1)) + y = np.array([-10 ** 12, 0, 1, 2, 3, 4], dtype=np.int64) + 10 ** 12 + + estimator = task_class( + backend=backend, + resampling_strategy=HoldoutValTypes.holdout_validation, + resampling_strategy_args=None, + seed=42, + ) + dataset = estimator.get_dataset(X, y) + assert dataset.output_type == ans + + y += 10 ** 12 + 10 # Check if the function catches overflow possibilities + if ans == 'continuous': + with pytest.raises(ValueError): # ValueError due to `Too large value` + estimator.get_dataset(X, y) + else: + estimator.get_dataset(X, y) diff --git a/test/test_data/test_target_validator.py b/test/test_data/test_target_validator.py index aadc73416..8fd4527d9 100644 --- a/test/test_data/test_target_validator.py +++ b/test/test_data/test_target_validator.py @@ -150,17 +150,17 @@ def test_targetvalidator_supported_types_noclassification(input_data_targettest) assert validator.encoder is None if hasattr(input_data_targettest, "iloc"): - np.testing.assert_array_equal( + assert np.allclose( np.ravel(input_data_targettest.to_numpy()), np.ravel(transformed_y) ) elif sparse.issparse(input_data_targettest): - np.testing.assert_array_equal( + assert np.allclose( np.ravel(input_data_targettest.todense()), np.ravel(transformed_y.todense()) ) else: - np.testing.assert_array_equal( + assert np.allclose( np.ravel(np.array(input_data_targettest)), np.ravel(transformed_y) ) diff --git a/test/test_datasets/test_base_dataset.py b/test/test_datasets/test_base_dataset.py new file mode 100644 index 000000000..52b2fa9a5 --- /dev/null +++ b/test/test_datasets/test_base_dataset.py @@ -0,0 +1,19 @@ +import numpy as np + +import pytest + +from autoPyTorch.datasets.base_dataset import _get_output_properties + + +@pytest.mark.parametrize( + "target_labels,dim,task_type", ( + (np.arange(5), 5, "multiclass"), + (np.linspace(0, 1, 3), 1, "continuous"), + (np.linspace(0, 1, 3)[:, np.newaxis], 1, "continuous") + ) +) +def test_get_output_properties(target_labels, dim, task_type): + train_tensors = np.array([np.empty_like(target_labels), target_labels]) + output_dim, output_type = _get_output_properties(train_tensors) + assert output_dim == dim + assert output_type == task_type