Skip to content

Commit 7d7cb68

Browse files
committed
pre-release
1 parent c1cdec1 commit 7d7cb68

File tree

7 files changed

+70
-75
lines changed

7 files changed

+70
-75
lines changed

CHANGELOG.md

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,25 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
88

99
### Added
1010

11+
-
12+
13+
### Changed
14+
15+
-
16+
17+
### Removed
18+
19+
-
20+
21+
### Fixed
22+
23+
-
24+
25+
26+
## [21.07] - 2021-07-29
27+
28+
### Added
29+
1130
- added `pre-commit` hook to run codestyle checker on commit ([#1257](https://github.com/catalyst-team/catalyst/pull/1257))
1231
- `on publish` github action for docker and docs added ([#1260](https://github.com/catalyst-team/catalyst/pull/1260))
1332
- MixupCallback and `utils.mixup_batch` ([#1241](https://github.com/catalyst-team/catalyst/pull/1241))

CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ We are using the Github CI for our test cases validation:
109109
- [unit tests](https://github.com/catalyst-team/catalyst/blob/master/.github/workflows/dl_cpu.yml#L113)
110110
- [integrations tests](https://github.com/catalyst-team/catalyst/blob/master/.github/workflows/dl_cpu.yml#L114#L117)
111111

112-
We also have a [colab minimal CI/CD](https://colab.research.google.com/drive/1JCGTVvWlrIsLXMPRRRSWiAstSLic4nbA) as an independent step-by-step handmade tests option.
112+
We also have a [colab minimal CI/CD](https://colab.research.google.com/github/catalyst-team/catalyst/blob/master/examples/notebooks/colab_ci_cd.ipynb) as an independent step-by-step handmade tests option.
113113
Please use it as a collaborative platform, if you have any issues during the PR.
114114

115115
### Codestyle

README.md

Lines changed: 39 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -71,13 +71,11 @@ model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
7171
criterion = nn.CrossEntropyLoss()
7272
optimizer = optim.Adam(model.parameters(), lr=0.02)
7373

74+
train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
75+
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
7476
loaders = {
75-
"train": DataLoader(
76-
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
77-
),
78-
"valid": DataLoader(
79-
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
80-
),
77+
"train": DataLoader(train_data, batch_size=32),
78+
"valid": DataLoader(valid_data, batch_size=32),
8179
}
8280

8381
runner = dl.SupervisedRunner(
@@ -220,13 +218,11 @@ from catalyst.contrib.datasets import MNIST
220218
model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
221219
optimizer = optim.Adam(model.parameters(), lr=0.02)
222220

221+
train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
222+
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
223223
loaders = {
224-
"train": DataLoader(
225-
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
226-
),
227-
"valid": DataLoader(
228-
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
229-
),
224+
"train": DataLoader(train_data, batch_size=32),
225+
"valid": DataLoader(valid_data, batch_size=32),
230226
}
231227

232228
class CustomRunner(dl.Runner):
@@ -626,13 +622,11 @@ model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
626622
criterion = nn.CrossEntropyLoss()
627623
optimizer = optim.Adam(model.parameters(), lr=0.02)
628624

625+
train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
626+
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
629627
loaders = {
630-
"train": DataLoader(
631-
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
632-
),
633-
"valid": DataLoader(
634-
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
635-
),
628+
"train": DataLoader(train_data, batch_size=32),
629+
"valid": DataLoader(valid_data, batch_size=32),
636630
}
637631

638632
runner = dl.SupervisedRunner()
@@ -688,13 +682,11 @@ model = nn.Sequential(
688682
criterion = IoULoss()
689683
optimizer = torch.optim.Adam(model.parameters(), lr=0.02)
690684

685+
train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
686+
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
691687
loaders = {
692-
"train": DataLoader(
693-
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
694-
),
695-
"valid": DataLoader(
696-
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
697-
),
688+
"train": DataLoader(train_data, batch_size=32),
689+
"valid": DataLoader(valid_data, batch_size=32),
698690
}
699691

700692
class CustomRunner(dl.SupervisedRunner):
@@ -750,13 +742,11 @@ student = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
750742
criterion = {"cls": nn.CrossEntropyLoss(), "kl": nn.KLDivLoss(reduction="batchmean")}
751743
optimizer = optim.Adam(student.parameters(), lr=0.02)
752744

745+
train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
746+
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
753747
loaders = {
754-
"train": DataLoader(
755-
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
756-
),
757-
"valid": DataLoader(
758-
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
759-
),
748+
"train": DataLoader(train_data, batch_size=32),
749+
"valid": DataLoader(valid_data, batch_size=32),
760750
}
761751

762752
class DistilRunner(dl.Runner):
@@ -934,11 +924,8 @@ optimizer = {
934924
"generator": torch.optim.Adam(generator.parameters(), lr=0.0003, betas=(0.5, 0.999)),
935925
"discriminator": torch.optim.Adam(discriminator.parameters(), lr=0.0003, betas=(0.5, 0.999)),
936926
}
937-
loaders = {
938-
"train": DataLoader(
939-
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
940-
)
941-
}
927+
train_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
928+
loaders = {"train": DataLoader(train_data, batch_size=32)}
942929

943930
class CustomRunner(dl.Runner):
944931
def predict_batch(self, batch):
@@ -1099,13 +1086,11 @@ class CustomRunner(dl.IRunner):
10991086
return 3
11001087

11011088
def get_loaders(self, stage: str):
1089+
train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
1090+
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
11021091
loaders = {
1103-
"train": DataLoader(
1104-
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
1105-
),
1106-
"valid": DataLoader(
1107-
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
1108-
),
1092+
"train": DataLoader(train_data, batch_size=32),
1093+
"valid": DataLoader(valid_data, batch_size=32),
11091094
}
11101095
return loaders
11111096

@@ -1202,13 +1187,11 @@ class CustomRunner(dl.IRunner):
12021187
return 3
12031188

12041189
def get_loaders(self, stage: str):
1190+
train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
1191+
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
12051192
loaders = {
1206-
"train": DataLoader(
1207-
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
1208-
),
1209-
"valid": DataLoader(
1210-
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
1211-
),
1193+
"train": DataLoader(train_data, batch_size=32),
1194+
"valid": DataLoader(valid_data, batch_size=32),
12121195
}
12131196
return loaders
12141197

@@ -1311,13 +1294,11 @@ class CustomRunner(dl.IRunner):
13111294
return 3
13121295

13131296
def get_loaders(self, stage: str):
1297+
train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
1298+
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
13141299
loaders = {
1315-
"train": DataLoader(
1316-
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
1317-
),
1318-
"valid": DataLoader(
1319-
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
1320-
),
1300+
"train": DataLoader(train_data, batch_size=32),
1301+
"valid": DataLoader(valid_data, batch_size=32),
13211302
}
13221303
return loaders
13231304

@@ -1409,13 +1390,11 @@ def objective(trial):
14091390
lr = trial.suggest_loguniform("lr", 1e-3, 1e-1)
14101391
num_hidden = int(trial.suggest_loguniform("num_hidden", 32, 128))
14111392

1393+
train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
1394+
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
14121395
loaders = {
1413-
"train": DataLoader(
1414-
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
1415-
),
1416-
"valid": DataLoader(
1417-
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
1418-
),
1396+
"train": DataLoader(train_data, batch_size=32),
1397+
"valid": DataLoader(valid_data, batch_size=32),
14191398
}
14201399
model = nn.Sequential(
14211400
nn.Flatten(), nn.Linear(784, num_hidden), nn.ReLU(), nn.Linear(num_hidden, 10)
@@ -1582,6 +1561,7 @@ best practices for your deep learning research and development.
15821561

15831562
### Documentation
15841563
- [master](https://catalyst-team.github.io/catalyst/)
1564+
- [21.07](https://catalyst-team.github.io/catalyst/v21.07/index.html)
15851565
- [21.06](https://catalyst-team.github.io/catalyst/v21.06/index.html)
15861566
- [21.05](https://catalyst-team.github.io/catalyst/v21.05/index.html) ([Catalyst — A PyTorch Framework for Accelerated Deep Learning R&D](https://medium.com/pytorch/catalyst-a-pytorch-framework-for-accelerated-deep-learning-r-d-ad9621e4ca88?source=friends_link&sk=885b4409aecab505db0a63b06f19dcef))
15871567
- [21.04/21.04.1](https://catalyst-team.github.io/catalyst/v21.04/index.html), [21.04.2](https://catalyst-team.github.io/catalyst/v21.04.2/index.html)

catalyst/__version__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "21.06"
1+
__version__ = "21.07"

docs/index.rst

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -46,13 +46,11 @@ Getting started
4646
criterion = nn.CrossEntropyLoss()
4747
optimizer = optim.Adam(model.parameters(), lr=0.02)
4848
49+
train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
50+
valid_data = MNIST(os.getcwd(), train=False, download=True, transform=ToTensor())
4951
loaders = {
50-
"train": DataLoader(
51-
MNIST(os.getcwd(), train=True, download=True, transform=ToTensor()), batch_size=32
52-
),
53-
"valid": DataLoader(
54-
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()), batch_size=32
55-
),
52+
"train": DataLoader(train_data, batch_size=32),
53+
"valid": DataLoader(valid_data, batch_size=32),
5654
}
5755
5856
runner = dl.SupervisedRunner(

examples/engines/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,12 @@
22

33
Let's check different
44
DataParallel and DistributedDataParallel multi-GPU setups with Catalyst Engines.
5-
> *Please use `pip install git+https://github.com/catalyst-team/catalyst@master --upgrade` before the `v21.06` release.*
65

76

87
## PyTorch
98
```bash
109
pip install catalyst
10+
CUDA_VISIBLE_DEVICES="0" python multi_gpu.py --engine=de
1111
CUDA_VISIBLE_DEVICES="0,1" python multi_gpu.py --engine=dp
1212
CUDA_VISIBLE_DEVICES="0,1" python multi_gpu.py --engine=ddp
1313
```

examples/engines/multi_gpu.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
from catalyst.data import transforms
1414

1515
E2E = {
16+
"de": dl.DeviceEngine,
1617
"dp": dl.DataParallelEngine,
1718
"ddp": dl.DistributedDataParallelEngine,
1819
}
@@ -97,14 +98,11 @@ def get_loaders(self, stage: str):
9798
transform = transforms.Compose(
9899
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
99100
)
101+
train_data = CIFAR10(os.getcwd(), train=True, download=True, transform=transform)
102+
valid_data = CIFAR10(os.getcwd(), train=False, download=True, transform=transform)
100103
return {
101-
"train": DataLoader(
102-
CIFAR10(os.getcwd(), train=True, download=True, transform=transform), batch_size=32
103-
),
104-
"valid": DataLoader(
105-
CIFAR10(os.getcwd(), train=False, download=True, transform=transform),
106-
batch_size=32,
107-
),
104+
"train": DataLoader(train_data, batch_size=32),
105+
"valid": DataLoader(valid_data, batch_size=32),
108106
}
109107

110108
def get_model(self, stage: str):

0 commit comments

Comments
 (0)