Skip to content

Commit 0771dce

Browse files
[MAINT] Drop 3.6 python support (#258)
* Drop 3.6 python * torch tensor fix * Increase samples * fix flake
1 parent eda89f4 commit 0771dce

File tree

9 files changed

+49
-79
lines changed

9 files changed

+49
-79
lines changed

.github/workflows/pytest.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ jobs:
88
runs-on: ubuntu-latest
99
strategy:
1010
matrix:
11-
python-version: [3.6, 3.7, 3.8]
11+
python-version: [3.7, 3.8]
1212
include:
1313
- python-version: 3.8
1414
code-cov: true
@@ -52,4 +52,4 @@ jobs:
5252
uses: codecov/codecov-action@v1
5353
with:
5454
fail_ci_if_error: true
55-
verbose: true
55+
verbose: true

autoPyTorch/pipeline/components/preprocessing/image_preprocessing/normalise/ImageNormalizer.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import numpy as np
44

5-
import torch.tensor
5+
import torch
66

77
from autoPyTorch.pipeline.components.preprocessing.image_preprocessing.normalise.base_normalizer import BaseNormalizer
88

@@ -30,16 +30,16 @@ def fit(self, X: Dict[str, Any], y: Optional[Any] = None) -> "ImageNormalizer":
3030
self.std = X['dataset_properties']['std']
3131
return self
3232

33-
def __call__(self, X: Union[np.ndarray, torch.tensor]) -> Union[np.ndarray, torch.tensor]:
33+
def __call__(self, X: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
3434
"""
3535
Makes the autoPyTorchPreprocessingComponent Callable. Calling the component
3636
calls the transform function of the underlying early_preprocessor and
3737
returns the transformed array.
3838
Args:
39-
X (Union[np.ndarray, torch.tensor]): input data tensor
39+
X (Union[np.ndarray, torch.Tensor]): input data tensor
4040
4141
Returns:
42-
Union[np.ndarray, torch.tensor]: Transformed data tensor
42+
Union[np.ndarray, torch.Tensor]: Transformed data tensor
4343
"""
4444
X = (X - self.mean) / self.std
4545
return X

autoPyTorch/pipeline/components/preprocessing/image_preprocessing/normalise/NoNormalizer.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import numpy as np
44

5-
import torch.tensor
5+
import torch
66

77
from autoPyTorch.pipeline.components.preprocessing.image_preprocessing.normalise.base_normalizer import (
88
BaseNormalizer
@@ -34,16 +34,16 @@ def transform(self, X: Dict[str, Any]) -> Dict[str, Any]:
3434
X.update({'normalise': self})
3535
return X
3636

37-
def __call__(self, X: Union[np.ndarray, torch.tensor]) -> Union[np.ndarray, torch.tensor]:
37+
def __call__(self, X: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
3838
"""
3939
Makes the autoPyTorchPreprocessingComponent Callable. Calling the component
4040
calls the transform function of the underlying early_preprocessor and
4141
returns the transformed array.
4242
Args:
43-
X (Union[np.ndarray, torch.tensor]): input data tensor
43+
X (Union[np.ndarray, torch.Tensor]): input data tensor
4444
4545
Returns:
46-
Union[np.ndarray, torch.tensor]: Transformed data tensor
46+
Union[np.ndarray, torch.Tensor]: Transformed data tensor
4747
"""
4848
return X
4949

setup.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,10 @@
11
import setuptools
2+
import sys
3+
if sys.version_info < (3, 7):
4+
raise ValueError(
5+
'Unsupported Python version %d.%d.%d found. Auto-PyTorch requires Python '
6+
'3.7 or higher.' % (sys.version_info.major, sys.version_info.minor, sys.version_info.micro)
7+
)
28

39
with open("README.md", "r") as f:
410
long_description = f.read()
@@ -27,10 +33,12 @@
2733
"Topic :: Utilities",
2834
"Topic :: Scientific/Engineering",
2935
"Topic :: Scientific/Engineering :: Artificial Intelligence",
30-
"Programming Language :: Python :: 3",
36+
'Programming Language :: Python :: 3.7',
37+
'Programming Language :: Python :: 3.8',
38+
'Programming Language :: Python :: 3.9',
3139
"License :: OSI Approved :: BSD License",
3240
],
33-
python_requires='>=3',
41+
python_requires='>=3.7',
3442
platforms=['Linux'],
3543
install_requires=requirements,
3644
include_package_data=True,

test/conftest.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
from autoPyTorch.utils.pipeline import get_dataset_requirements
2727

2828

29-
N_SAMPLES = 200
29+
N_SAMPLES = 300
3030

3131

3232
@pytest.fixture(scope="session")
@@ -222,7 +222,7 @@ def get_tabular_data(task):
222222
validator = TabularInputValidator(is_classification=True).fit(X.copy(), y.copy())
223223

224224
elif task == "regression_numerical_only":
225-
X, y = make_regression(n_samples=N_SAMPLES,
225+
X, y = make_regression(n_samples=3 * N_SAMPLES,
226226
n_features=4,
227227
n_informative=3,
228228
n_targets=1,

test/test_api/test_api.py

Lines changed: 21 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
import os
33
import pathlib
44
import pickle
5-
import sys
65
import unittest
76
from test.test_api.utils import dummy_do_dummy_prediction, dummy_eval_function, dummy_traditional_classification
87

@@ -63,17 +62,11 @@ def test_tabular_classification(openml_id, resampling_strategy, backend, resampl
6362
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
6463
X, y, random_state=42)
6564

66-
include = None
67-
# for python less than 3.7, learned entity embedding
68-
# is not able to be stored on disk (only on CI)
69-
if sys.version_info < (3, 7):
70-
include = {'network_embedding': ['NoEmbedding']}
7165
# Search for a good configuration
7266
estimator = TabularClassificationTask(
7367
backend=backend,
7468
resampling_strategy=resampling_strategy,
7569
resampling_strategy_args=resampling_strategy_args,
76-
include_components=include,
7770
seed=42,
7871
)
7972

@@ -210,18 +203,14 @@ def test_tabular_classification(openml_id, resampling_strategy, backend, resampl
210203
assert 'train_loss' in incumbent_results
211204

212205
# Check that we can pickle
213-
# Test pickle
214-
# This can happen on python greater than 3.6
215-
# as older python do not control the state of the logger
216-
if sys.version_info >= (3, 7):
217-
dump_file = os.path.join(estimator._backend.temporary_directory, 'dump.pkl')
206+
dump_file = os.path.join(estimator._backend.temporary_directory, 'dump.pkl')
218207

219-
with open(dump_file, 'wb') as f:
220-
pickle.dump(estimator, f)
208+
with open(dump_file, 'wb') as f:
209+
pickle.dump(estimator, f)
221210

222-
with open(dump_file, 'rb') as f:
223-
restored_estimator = pickle.load(f)
224-
restored_estimator.predict(X_test)
211+
with open(dump_file, 'rb') as f:
212+
restored_estimator = pickle.load(f)
213+
restored_estimator.predict(X_test)
225214

226215
# Test refit on dummy data
227216
estimator.refit(dataset=backend.load_datamanager())
@@ -264,17 +253,11 @@ def test_tabular_regression(openml_name, resampling_strategy, backend, resamplin
264253
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
265254
X, y, random_state=1)
266255

267-
include = None
268-
# for python less than 3.7, learned entity embedding
269-
# is not able to be stored on disk (only on CI)
270-
if sys.version_info < (3, 7):
271-
include = {'network_embedding': ['NoEmbedding']}
272256
# Search for a good configuration
273257
estimator = TabularRegressionTask(
274258
backend=backend,
275259
resampling_strategy=resampling_strategy,
276260
resampling_strategy_args=resampling_strategy_args,
277-
include_components=include,
278261
seed=42,
279262
)
280263

@@ -403,30 +386,26 @@ def test_tabular_regression(openml_name, resampling_strategy, backend, resamplin
403386
assert 'train_loss' in incumbent_results, estimator.run_history.data
404387

405388
# Check that we can pickle
406-
# Test pickle
407-
# This can happen on python greater than 3.6
408-
# as older python do not control the state of the logger
409-
if sys.version_info >= (3, 7):
410-
dump_file = os.path.join(estimator._backend.temporary_directory, 'dump.pkl')
389+
dump_file = os.path.join(estimator._backend.temporary_directory, 'dump.pkl')
411390

412-
with open(dump_file, 'wb') as f:
413-
pickle.dump(estimator, f)
391+
with open(dump_file, 'wb') as f:
392+
pickle.dump(estimator, f)
414393

415-
with open(dump_file, 'rb') as f:
416-
restored_estimator = pickle.load(f)
417-
restored_estimator.predict(X_test)
394+
with open(dump_file, 'rb') as f:
395+
restored_estimator = pickle.load(f)
396+
restored_estimator.predict(X_test)
418397

419-
# Test refit on dummy data
420-
estimator.refit(dataset=backend.load_datamanager())
398+
# Test refit on dummy data
399+
estimator.refit(dataset=backend.load_datamanager())
421400

422-
# Make sure that a configuration space is stored in the estimator
423-
assert isinstance(estimator.get_search_space(), CS.ConfigurationSpace)
401+
# Make sure that a configuration space is stored in the estimator
402+
assert isinstance(estimator.get_search_space(), CS.ConfigurationSpace)
424403

425-
representation = estimator.show_models()
426-
assert isinstance(representation, str)
427-
assert 'Weight' in representation
428-
assert 'Preprocessing' in representation
429-
assert 'Estimator' in representation
404+
representation = estimator.show_models()
405+
assert isinstance(representation, str)
406+
assert 'Weight' in representation
407+
assert 'Preprocessing' in representation
408+
assert 'Estimator' in representation
430409

431410

432411
@pytest.mark.parametrize('openml_id', (
@@ -536,16 +515,10 @@ def test_portfolio_selection(openml_id, backend, n_samples):
536515
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
537516
X, y, random_state=1)
538517

539-
include = None
540-
# for python less than 3.7, learned entity embedding
541-
# is not able to be stored on disk (only on CI)
542-
if sys.version_info < (3, 7):
543-
include = {'network_embedding': ['NoEmbedding']}
544518
# Search for a good configuration
545519
estimator = TabularClassificationTask(
546520
backend=backend,
547521
resampling_strategy=HoldoutValTypes.holdout_validation,
548-
include_components=include
549522
)
550523

551524
with unittest.mock.patch.object(estimator, '_do_dummy_prediction', new=dummy_do_dummy_prediction):
@@ -584,16 +557,9 @@ def test_portfolio_selection_failure(openml_id, backend, n_samples):
584557
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
585558
X, y, random_state=1)
586559

587-
include = None
588-
# for python less than 3.7, learned entity embedding
589-
# is not able to be stored on disk (only on CI)
590-
if sys.version_info < (3, 7):
591-
include = {'network_embedding': ['NoEmbedding']}
592-
# Search for a good configuration
593560
estimator = TabularClassificationTask(
594561
backend=backend,
595562
resampling_strategy=HoldoutValTypes.holdout_validation,
596-
include_components=include
597563
)
598564
with pytest.raises(FileNotFoundError, match=r"The path: .+? provided for 'portfolio_selection' "
599565
r"for the file containing the portfolio configurations "

test/test_ensemble/test_ensemble.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -690,8 +690,6 @@ def test_ensemble_builder_process_realrun(dask_client, ensemble_backend):
690690

691691
@flaky(max_runs=3)
692692
@unittest.mock.patch('autoPyTorch.ensemble.ensemble_builder.EnsembleBuilder.fit_ensemble')
693-
@pytest.mark.skipif(sys.version_info >= (3, 7),
694-
reason="Causes out-of-memory Errors in CI")
695693
def test_ensemble_builder_nbest_remembered(fit_ensemble, ensemble_backend, dask_client):
696694
"""
697695
Makes sure ensemble builder returns the size of the ensemble that pynisher allowed

test/test_evaluation/test_evaluation.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -356,7 +356,6 @@ def test_exception_in_target_function(self, eval_holdout_mock):
356356
self.assertIn('traceback', info[1].additional_info)
357357
self.assertNotIn('exitcode', info[1].additional_info)
358358

359-
@unittest.skipIf(sys.version_info < (3, 7), reason="requires python3.7 or higher")
360359
def test_silent_exception_in_target_function(self):
361360
config = unittest.mock.Mock(spec=int)
362361
config.config_id = 198

test/test_pipeline/components/setup/test_setup_traditional_models.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -126,12 +126,11 @@ def test_model_fit_predict_score(traditional_learner, fit_dictionary_tabular):
126126
fit_dictionary_tabular['y_train'][fit_dictionary_tabular['val_indices']])
127127
assert np.allclose(score, model.fit_output['val_score'], atol=1e-6)
128128

129-
if sys.version_info >= (3, 7):
130-
dump_file = os.path.join(fit_dictionary_tabular['backend'].temporary_directory, 'dump.pkl')
129+
dump_file = os.path.join(fit_dictionary_tabular['backend'].temporary_directory, 'dump.pkl')
131130

132-
with open(dump_file, 'wb') as f:
133-
pickle.dump(model, f)
131+
with open(dump_file, 'wb') as f:
132+
pickle.dump(model, f)
134133

135-
with open(dump_file, 'rb') as f:
136-
restored_estimator = pickle.load(f)
137-
restored_estimator.predict(fit_dictionary_tabular['X_train'])
134+
with open(dump_file, 'rb') as f:
135+
restored_estimator = pickle.load(f)
136+
restored_estimator.predict(fit_dictionary_tabular['X_train'])

0 commit comments

Comments
 (0)