Skip to content
This repository was archived by the owner on Sep 10, 2025. It is now read-only.

Commit ccebe7c

Browse files
authored
seperating legacy tests (#1285)
1 parent 90fd332 commit ccebe7c

File tree

2 files changed

+32
-31
lines changed

2 files changed

+32
-31
lines changed

test/data/test_builtin_datasets.py

Lines changed: 0 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
import torchtext
66
import json
77
import hashlib
8-
from torchtext.legacy import data
98
from parameterized import parameterized
109
from ..common.torchtext_test_case import TorchtextTestCase
1110
from ..common.parameterized_utils import load_params
@@ -35,23 +34,6 @@ def _helper_test_func(self, length, target_length, results, target_results):
3534
target_results = tuple(torch.tensor(item, dtype=torch.int64) for item in target_results)
3635
self.assertEqual(results, target_results)
3736

38-
def test_wikitext2_legacy(self):
39-
from torchtext.legacy.datasets import WikiText2
40-
cachedir = os.path.join(self.project_root, ".data", "wikitext-2")
41-
conditional_remove(cachedir)
42-
43-
ds = WikiText2
44-
TEXT = data.Field(lower=True, batch_first=True)
45-
train, valid, test = ds.splits(TEXT)
46-
TEXT.build_vocab(train)
47-
train_iter, valid_iter, test_iter = data.BPTTIterator.splits(
48-
(train, valid, test), batch_size=3, bptt_len=30)
49-
50-
train_iter, valid_iter, test_iter = ds.iters(batch_size=4,
51-
bptt_len=30)
52-
53-
conditional_remove(cachedir)
54-
5537
def test_wikitext2(self):
5638
from torchtext.experimental.datasets import WikiText2
5739
cachedir = os.path.join(self.project_root, ".data", "wikitext-2")
@@ -91,19 +73,6 @@ def test_wikitext2(self):
9173
conditional_remove(cachedir)
9274
conditional_remove(cachefile)
9375

94-
def test_penntreebank_legacy(self):
95-
from torchtext.legacy.datasets import PennTreebank
96-
# smoke test to ensure penn treebank works properly
97-
TEXT = data.Field(lower=True, batch_first=True)
98-
ds = PennTreebank
99-
train, valid, test = ds.splits(TEXT)
100-
TEXT.build_vocab(train)
101-
train_iter, valid_iter, test_iter = data.BPTTIterator.splits(
102-
(train, valid, test), batch_size=3, bptt_len=30)
103-
104-
train_iter, valid_iter, test_iter = ds.iters(batch_size=4,
105-
bptt_len=30)
106-
10776
def test_penntreebank(self):
10877
from torchtext.experimental.datasets import PennTreebank
10978
# smoke test to ensure penn treebank works properly

test/legacy/data/test_dataset.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,41 @@
88
import pytest
99

1010
from ...common.torchtext_test_case import TorchtextTestCase
11+
from ...common.assets import conditional_remove
1112

1213

1314
class TestDataset(TorchtextTestCase):
15+
16+
def test_wikitext2_legacy(self):
17+
from torchtext.legacy.datasets import WikiText2
18+
cachedir = os.path.join(self.project_root, ".data", "wikitext-2")
19+
conditional_remove(cachedir)
20+
21+
ds = WikiText2
22+
TEXT = data.Field(lower=True, batch_first=True)
23+
train, valid, test = ds.splits(TEXT)
24+
TEXT.build_vocab(train)
25+
train_iter, valid_iter, test_iter = data.BPTTIterator.splits(
26+
(train, valid, test), batch_size=3, bptt_len=30)
27+
28+
train_iter, valid_iter, test_iter = ds.iters(batch_size=4,
29+
bptt_len=30)
30+
31+
conditional_remove(cachedir)
32+
33+
def test_penntreebank_legacy(self):
34+
from torchtext.legacy.datasets import PennTreebank
35+
# smoke test to ensure penn treebank works properly
36+
TEXT = data.Field(lower=True, batch_first=True)
37+
ds = PennTreebank
38+
train, valid, test = ds.splits(TEXT)
39+
TEXT.build_vocab(train)
40+
train_iter, valid_iter, test_iter = data.BPTTIterator.splits(
41+
(train, valid, test), batch_size=3, bptt_len=30)
42+
43+
train_iter, valid_iter, test_iter = ds.iters(batch_size=4,
44+
bptt_len=30)
45+
1446
def test_tabular_simple_data(self):
1547
for data_format in ["csv", "tsv", "json"]:
1648
self.write_test_ppid_dataset(data_format=data_format)

0 commit comments

Comments
 (0)