Skip to content

Commit a9dd328

Browse files
author
Joan Massich
committed
Apply nose2pytest
1 parent 1ea07b2 commit a9dd328

25 files changed

+216
-229
lines changed

imblearn/combine/tests/test_smote_enn.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66
from __future__ import print_function
77

88
import numpy as np
9-
from sklearn.utils.testing import (assert_allclose, assert_array_equal,
10-
assert_raises_regex)
9+
from sklearn.utils.testing import assert_allclose, assert_array_equal
10+
from sklearn.utils.testing import assert_raises_regex
1111

1212
from imblearn.combine import SMOTEENN
1313
from imblearn.under_sampling import EditedNearestNeighbours

imblearn/combine/tests/test_smote_tomek.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66
from __future__ import print_function
77

88
import numpy as np
9-
from sklearn.utils.testing import (assert_allclose, assert_array_equal,
10-
assert_raises_regex)
9+
from sklearn.utils.testing import assert_allclose, assert_array_equal
10+
from sklearn.utils.testing import assert_raises_regex
1111

1212
from imblearn.combine import SMOTETomek
1313
from imblearn.over_sampling import SMOTE

imblearn/datasets/tests/test_imbalance.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@
1111
import numpy as np
1212

1313
from sklearn.datasets import load_iris
14-
from sklearn.utils.testing import (assert_equal, assert_raises_regex,
15-
assert_warns_message)
14+
from sklearn.utils.testing import assert_raises_regex
15+
from sklearn.utils.testing import assert_warns_message
1616

1717
from imblearn.datasets import make_imbalance
1818

@@ -46,17 +46,17 @@ def test_make_imbalance_float():
4646
X_, y_ = assert_warns_message(DeprecationWarning,
4747
"'ratio' being a float is deprecated",
4848
make_imbalance, X, Y, ratio=0.5, min_c_=1)
49-
assert_equal(Counter(y_), {0: 50, 1: 25, 2: 50})
49+
assert Counter(y_) == {0: 50, 1: 25, 2: 50}
5050
# resample without using min_c_
5151
X_, y_ = make_imbalance(X_, y_, ratio=0.25, min_c_=None)
52-
assert_equal(Counter(y_), {0: 50, 1: 12, 2: 50})
52+
assert Counter(y_) == {0: 50, 1: 12, 2: 50}
5353

5454

5555
def test_make_imbalance_dict():
5656
ratio = {0: 10, 1: 20, 2: 30}
5757
X_, y_ = make_imbalance(X, Y, ratio=ratio)
58-
assert_equal(Counter(y_), ratio)
58+
assert Counter(y_) == ratio
5959

6060
ratio = {0: 10, 1: 20}
6161
X_, y_ = make_imbalance(X, Y, ratio=ratio)
62-
assert_equal(Counter(y_), {0: 10, 1: 20, 2: 50})
62+
assert Counter(y_) == {0: 10, 1: 20, 2: 50}

imblearn/datasets/tests/test_zenodo.py

+10-10
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@
77
# License: MIT
88

99
from imblearn.datasets import fetch_datasets
10-
from sklearn.utils.testing import (assert_equal, assert_allclose,
11-
assert_raises_regex, SkipTest)
10+
from sklearn.utils.testing import SkipTest, assert_allclose
11+
from sklearn.utils.testing import assert_raises_regex
1212

1313
DATASET_SHAPE = {'ecoli': (336, 7),
1414
'optical_digits': (5620, 64),
@@ -54,12 +54,12 @@ def test_fetch():
5454
for k in DATASET_SHAPE.keys():
5555

5656
X1, X2 = datasets1[k].data, datasets2[k].data
57-
assert_equal(DATASET_SHAPE[k], X1.shape)
58-
assert_equal(X1.shape, X2.shape)
57+
assert DATASET_SHAPE[k] == X1.shape
58+
assert X1.shape == X2.shape
5959

6060
y1, y2 = datasets1[k].target, datasets2[k].target
61-
assert_equal((X1.shape[0],), y1.shape)
62-
assert_equal((X1.shape[0],), y2.shape)
61+
assert (X1.shape[0],) == y1.shape
62+
assert (X1.shape[0],) == y2.shape
6363

6464

6565
def test_fetch_filter():
@@ -73,14 +73,14 @@ def test_fetch_filter():
7373
random_state=37)
7474

7575
X1, X2 = datasets1['ecoli'].data, datasets2['ecoli'].data
76-
assert_equal(DATASET_SHAPE['ecoli'], X1.shape)
77-
assert_equal(X1.shape, X2.shape)
76+
assert DATASET_SHAPE['ecoli'] == X1.shape
77+
assert X1.shape == X2.shape
7878

7979
assert_allclose(X1.sum(), X2.sum())
8080

8181
y1, y2 = datasets1['ecoli'].target, datasets2['ecoli'].target
82-
assert_equal((X1.shape[0],), y1.shape)
83-
assert_equal((X1.shape[0],), y2.shape)
82+
assert (X1.shape[0],) == y1.shape
83+
assert (X1.shape[0],) == y2.shape
8484

8585

8686
def test_fetch_error():

imblearn/ensemble/tests/test_balance_cascade.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66
from __future__ import print_function
77

88
import numpy as np
9-
from sklearn.utils.testing import (assert_array_equal, assert_raises,
10-
assert_raises_regex)
9+
from sklearn.utils.testing import assert_array_equal, assert_raises
10+
from sklearn.utils.testing import assert_raises_regex
1111
from sklearn.ensemble import RandomForestClassifier
1212

1313
from imblearn.ensemble import BalanceCascade

imblearn/ensemble/tests/test_easy_ensemble.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from __future__ import print_function
77

88
import numpy as np
9-
from sklearn.utils.testing import assert_array_equal, assert_equal
9+
from sklearn.utils.testing import assert_array_equal
1010

1111
from imblearn.ensemble import EasyEnsemble
1212

@@ -25,10 +25,10 @@ def test_ee_init():
2525
ratio = 1.
2626
ee = EasyEnsemble(ratio=ratio, random_state=RND_SEED)
2727

28-
assert_equal(ee.ratio, ratio)
29-
assert_equal(ee.replacement, False)
30-
assert_equal(ee.n_subsets, 10)
31-
assert_equal(ee.random_state, RND_SEED)
28+
assert ee.ratio == ratio
29+
assert not ee.replacement
30+
assert ee.n_subsets == 10
31+
assert ee.random_state == RND_SEED
3232

3333

3434
def test_fit_sample_auto():

imblearn/metrics/tests/test_classification.py

+26-28
Original file line numberDiff line numberDiff line change
@@ -16,15 +16,14 @@
1616
from sklearn.preprocessing import label_binarize
1717
from sklearn.utils.fixes import np_version
1818
from sklearn.utils.validation import check_random_state
19-
from sklearn.utils.testing import (assert_allclose, assert_array_equal,
20-
assert_no_warnings, assert_equal,
21-
assert_raises, assert_warns_message,
22-
ignore_warnings, assert_not_equal,
23-
assert_raise_message)
24-
from sklearn.metrics import (accuracy_score, average_precision_score,
25-
brier_score_loss, cohen_kappa_score,
26-
jaccard_similarity_score, precision_score,
27-
recall_score, roc_auc_score)
19+
from sklearn.utils.testing import assert_allclose, assert_array_equal
20+
from sklearn.utils.testing import assert_no_warnings, assert_raises
21+
from sklearn.utils.testing import assert_warns_message, ignore_warnings
22+
from sklearn.utils.testing import assert_raise_message
23+
from sklearn.metrics import accuracy_score, average_precision_score
24+
from sklearn.metrics import brier_score_loss, cohen_kappa_score
25+
from sklearn.metrics import jaccard_similarity_score, precision_score
26+
from sklearn.metrics import recall_score, roc_auc_score
2827

2928
from imblearn.metrics import sensitivity_specificity_support
3029
from imblearn.metrics import sensitivity_score
@@ -113,11 +112,11 @@ def test_sensitivity_specificity_score_binary():
113112

114113
def test_sensitivity_specificity_f_binary_single_class():
115114
# Such a case may occur with non-stratified cross-validation
116-
assert_equal(1., sensitivity_score([1, 1], [1, 1]))
117-
assert_equal(0., specificity_score([1, 1], [1, 1]))
115+
assert sensitivity_score([1, 1], [1, 1]) == 1.
116+
assert specificity_score([1, 1], [1, 1]) == 0.
118117

119-
assert_equal(0., sensitivity_score([-1, -1], [-1, -1]))
120-
assert_equal(0., specificity_score([-1, -1], [-1, -1]))
118+
assert sensitivity_score([-1, -1], [-1, -1]) == 0.
119+
assert specificity_score([-1, -1], [-1, -1]) == 0.
121120

122121

123122
@ignore_warnings
@@ -166,9 +165,8 @@ def test_sensitivity_specificity_ignored_labels():
166165
rtol=R_TOL)
167166

168167
# ensure the above were meaningful tests:
169-
for average in ['macro', 'weighted', 'micro']:
170-
assert_not_equal(
171-
specificity_13(average=average), specificity_all(average=average))
168+
for each in ['macro', 'weighted', 'micro']:
169+
assert specificity_13(average=each) != specificity_all(average=each)
172170

173171

174172
def test_sensitivity_specificity_error_multilabels():
@@ -333,15 +331,15 @@ def test_classification_report_imbalanced_multiclass():
333331
y_pred,
334332
labels=np.arange(len(iris.target_names)),
335333
target_names=iris.target_names)
336-
assert_equal(_format_report(report), expected_report)
334+
assert _format_report(report) == expected_report
337335
# print classification report with label detection
338336
expected_report = ('pre rec spe f1 geo iba sup 0 0.83 0.79 0.92 0.81 '
339337
'0.86 0.74 24 1 0.33 0.10 0.86 0.15 0.44 0.19 31 2 '
340338
'0.42 0.90 0.55 0.57 0.63 0.37 20 avg / total 0.51 '
341339
'0.53 0.80 0.47 0.62 0.41 75')
342340

343341
report = classification_report_imbalanced(y_true, y_pred)
344-
assert_equal(_format_report(report), expected_report)
342+
assert _format_report(report) == expected_report
345343

346344

347345
def test_classification_report_imbalanced_multiclass_with_digits():
@@ -361,14 +359,14 @@ def test_classification_report_imbalanced_multiclass_with_digits():
361359
labels=np.arange(len(iris.target_names)),
362360
target_names=iris.target_names,
363361
digits=5)
364-
assert_equal(_format_report(report), expected_report)
362+
assert _format_report(report) == expected_report
365363
# print classification report with label detection
366364
expected_report = ('pre rec spe f1 geo iba sup 0 0.83 0.79 0.92 0.81 '
367365
'0.86 0.74 24 1 0.33 0.10 0.86 0.15 0.44 0.19 31 2 '
368366
'0.42 0.90 0.55 0.57 0.63 0.37 20 avg / total 0.51 '
369367
'0.53 0.80 0.47 0.62 0.41 75')
370368
report = classification_report_imbalanced(y_true, y_pred)
371-
assert_equal(_format_report(report), expected_report)
369+
assert _format_report(report) == expected_report
372370

373371

374372
def test_classification_report_imbalanced_multiclass_with_string_label():
@@ -382,15 +380,15 @@ def test_classification_report_imbalanced_multiclass_with_string_label():
382380
'0.19 31 red 0.42 0.90 0.55 0.57 0.63 0.37 20 '
383381
'avg / total 0.51 0.53 0.80 0.47 0.62 0.41 75')
384382
report = classification_report_imbalanced(y_true, y_pred)
385-
assert_equal(_format_report(report), expected_report)
383+
assert _format_report(report) == expected_report
386384

387385
expected_report = ('pre rec spe f1 geo iba sup a 0.83 0.79 0.92 0.81 '
388386
'0.86 0.74 24 b 0.33 0.10 0.86 0.15 0.44 0.19 31 '
389387
'c 0.42 0.90 0.55 0.57 0.63 0.37 20 avg / total '
390388
'0.51 0.53 0.80 0.47 0.62 0.41 75')
391389
report = classification_report_imbalanced(
392390
y_true, y_pred, target_names=["a", "b", "c"])
393-
assert_equal(_format_report(report), expected_report)
391+
assert _format_report(report) == expected_report
394392

395393

396394
def test_classification_report_imbalanced_multiclass_with_unicode_label():
@@ -411,7 +409,7 @@ def test_classification_report_imbalanced_multiclass_with_unicode_label():
411409
classification_report_imbalanced, y_true, y_pred)
412410
else:
413411
report = classification_report_imbalanced(y_true, y_pred)
414-
assert_equal(_format_report(report), expected_report)
412+
assert _format_report(report) == expected_report
415413

416414

417415
def test_classification_report_imbalanced_multiclass_with_long_string_label():
@@ -427,7 +425,7 @@ def test_classification_report_imbalanced_multiclass_with_long_string_label():
427425
'0.37 20 avg / total 0.51 0.53 0.80 0.47 0.62 0.41 75')
428426

429427
report = classification_report_imbalanced(y_true, y_pred)
430-
assert_equal(_format_report(report), expected_report)
428+
assert _format_report(report) == expected_report
431429

432430

433431
def test_iba_sklearn_metrics():
@@ -436,22 +434,22 @@ def test_iba_sklearn_metrics():
436434
acc = make_index_balanced_accuracy(alpha=0.5, squared=True)(
437435
accuracy_score)
438436
score = acc(y_true, y_pred)
439-
assert_equal(score, 0.54756)
437+
assert score == 0.54756
440438

441439
jss = make_index_balanced_accuracy(alpha=0.5, squared=True)(
442440
jaccard_similarity_score)
443441
score = jss(y_true, y_pred)
444-
assert_equal(score, 0.54756)
442+
assert score == 0.54756
445443

446444
pre = make_index_balanced_accuracy(alpha=0.5, squared=True)(
447445
precision_score)
448446
score = pre(y_true, y_pred)
449-
assert_equal(score, 0.65025)
447+
assert score == 0.65025
450448

451449
rec = make_index_balanced_accuracy(alpha=0.5, squared=True)(
452450
recall_score)
453451
score = rec(y_true, y_pred)
454-
assert_equal(score, 0.41616000000000009)
452+
assert score == 0.41616000000000009
455453

456454

457455
def test_iba_error_y_score_prob():

imblearn/over_sampling/tests/test_adasyn.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66
from __future__ import print_function
77

88
import numpy as np
9-
from sklearn.utils.testing import (assert_allclose, assert_array_equal,
10-
assert_equal, assert_raises_regex)
9+
from sklearn.utils.testing import assert_allclose, assert_array_equal
10+
from sklearn.utils.testing import assert_raises_regex
1111
from sklearn.neighbors import NearestNeighbors
1212

1313
from imblearn.over_sampling import ADASYN
@@ -30,13 +30,13 @@
3030
def test_ada_init():
3131
ratio = 'auto'
3232
ada = ADASYN(ratio=ratio, random_state=RND_SEED)
33-
assert_equal(ada.random_state, RND_SEED)
33+
assert ada.random_state == RND_SEED
3434

3535

3636
def test_ada_fit():
3737
ada = ADASYN(random_state=RND_SEED)
3838
ada.fit(X, Y)
39-
assert_equal(ada.ratio_, {0: 4, 1: 0})
39+
assert ada.ratio_ == {0: 4, 1: 0}
4040

4141

4242
def test_ada_fit_sample():

imblearn/over_sampling/tests/test_random_over_sampler.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from collections import Counter
99

1010
import numpy as np
11-
from sklearn.utils.testing import assert_array_equal, assert_equal
11+
from sklearn.utils.testing import assert_array_equal
1212

1313
from imblearn.over_sampling import RandomOverSampler
1414

@@ -24,7 +24,7 @@
2424
def test_ros_init():
2525
ratio = 'auto'
2626
ros = RandomOverSampler(ratio=ratio, random_state=RND_SEED)
27-
assert_equal(ros.random_state, RND_SEED)
27+
assert ros.random_state == RND_SEED
2828

2929

3030
def test_ros_fit_sample():
@@ -75,6 +75,6 @@ def test_multiclass_fit_sample():
7575
ros = RandomOverSampler(random_state=RND_SEED)
7676
X_resampled, y_resampled = ros.fit_sample(X, y)
7777
count_y_res = Counter(y_resampled)
78-
assert_equal(count_y_res[0], 5)
79-
assert_equal(count_y_res[1], 5)
80-
assert_equal(count_y_res[2], 5)
78+
assert count_y_res[0] == 5
79+
assert count_y_res[1] == 5
80+
assert count_y_res[2] == 5

imblearn/over_sampling/tests/test_smote.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66
from __future__ import print_function
77

88
import numpy as np
9-
from sklearn.utils.testing import (assert_allclose, assert_array_equal,
10-
assert_raises_regex)
9+
from sklearn.utils.testing import assert_allclose, assert_array_equal
10+
from sklearn.utils.testing import assert_raises_regex
1111
from sklearn.neighbors import NearestNeighbors
1212
from sklearn.svm import SVC
1313

imblearn/tests/test_common.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,6 @@
33
# Christos Aridas
44
# License: MIT
55

6-
from sklearn.utils.testing import assert_greater
7-
from sklearn.utils.testing import assert_false
86
from sklearn.utils.testing import _named_check
97

108
from imblearn.utils.estimator_checks import check_estimator, _yield_all_checks
@@ -16,12 +14,12 @@ def test_all_estimator_no_base_class():
1614
for name, Estimator in all_estimators():
1715
msg = ("Base estimators such as {0} should not be included"
1816
" in all_estimators").format(name)
19-
assert_false(name.lower().startswith('base'), msg=msg)
17+
assert not name.lower().startswith('base'), msg
2018

2119

2220
def test_all_estimators():
2321
estimators = all_estimators(include_meta_estimators=True)
24-
assert_greater(len(estimators), 0)
22+
assert len(estimators) > 0
2523
for name, Estimator in estimators:
2624
# some can just not be sensibly default constructed
2725
yield (_named_check(check_estimator, name),

0 commit comments

Comments
 (0)