Skip to content

Commit 146ca82

Browse files
committed
- Added pyproject.toml to set max line size to 120
- Reformatted with black to 120 - Made black running on dist-tests with pnpm dist_lint
1 parent 9c65257 commit 146ca82

File tree

20 files changed

+77
-126
lines changed

20 files changed

+77
-126
lines changed

CONTRIBUTING.md

+6
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,12 @@ To add a new template,
122122
bash scripts/run_code_style.sh source_lint
123123
```
124124

125+
- To check if generated code complies with a style guide:
126+
127+
```sh
128+
pnpm test && sh ./scripts/run_tests.sh unzip && pnpm dist_lint
129+
```
130+
125131
_NOTE: Even if you have a half-completed/working PR, sending a PR is still a valid contribution and we can help you finish the PR._
126132

127133
**NOTE : When sending a PR, please kindly check if the changes are required to run in the CI.**

pyproject.toml

+23
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
[tool.black]
2+
line-length = 120
3+
target-version = ['py38', 'py39']
4+
include = '\.pyi?$'
5+
exclude = '''
6+
7+
(
8+
/(
9+
\.eggs # exclude a few common directories in the
10+
| \.git # root of the project
11+
| \.hg
12+
| \.mypy_cache
13+
| \.tox
14+
| \.venv
15+
| _build
16+
| buck-out
17+
| build
18+
| dist
19+
)/
20+
| foo.py # also separately exclude a file named foo.py in
21+
# the root of the project
22+
)
23+
'''

scripts/run_code_style.sh

+4-2
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,13 @@ if [ $1 == "dist_lint" ]; then
66
# Check that ./dist-tests/ exists and code is unzipped
77
ls ./dist-tests/vision-classification-all/main.py
88
# Comment dist-tests in .gitignore to make black running on ./dist-tests folder
9-
# TODO:
9+
sed -i "s/dist-tests/# dist-tests/g" .gitignore
10+
1011
ufmt diff .
1112
flake8 --select F401,F821 ./dist-tests # find unused imports and non imported objects
13+
1214
# Restore .gitignore
13-
# TODO:
15+
sed -i "s/\([# ]\+\)dist-tests/dist-tests/g" .gitignore
1416
elif [ $1 == "source_lint" ]; then
1517
ufmt diff .
1618
elif [ $1 == "fmt" ]; then

src/templates/template-common/utils.py

+8-26
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,7 @@
1616

1717
#::: } :::#
1818
#::: if (it.save_training || it.save_evaluation) { :::#
19-
from ignite.handlers import (
20-
Checkpoint,
21-
DiskSaver,
22-
global_step_from_engine,
23-
) # usort: skip
19+
from ignite.handlers import Checkpoint, DiskSaver, global_step_from_engine # usort: skip
2420

2521
#::: } else { :::#
2622
from ignite.handlers import Checkpoint
@@ -84,9 +80,7 @@ def log_metrics(engine: Engine, tag: str) -> None:
8480
tag
8581
a string to add at the start of output.
8682
"""
87-
metrics_format = "{0} [{1}/{2}]: {3}".format(
88-
tag, engine.state.epoch, engine.state.iteration, engine.state.metrics
89-
)
83+
metrics_format = "{0} [{1}/{2}]: {3}".format(tag, engine.state.epoch, engine.state.iteration, engine.state.metrics)
9084
engine.logger.info(metrics_format)
9185

9286

@@ -175,21 +169,13 @@ def setup_exp_logging(config, trainer, optimizers, evaluators):
175169
"""Setup Experiment Tracking logger from Ignite."""
176170

177171
#::: if (it.logger === 'clearml') { :::#
178-
logger = common.setup_clearml_logging(
179-
trainer, optimizers, evaluators, config.log_every_iters
180-
)
172+
logger = common.setup_clearml_logging(trainer, optimizers, evaluators, config.log_every_iters)
181173
#::: } else if (it.logger === 'mlflow') { :::#
182-
logger = common.setup_mlflow_logging(
183-
trainer, optimizers, evaluators, config.log_every_iters
184-
)
174+
logger = common.setup_mlflow_logging(trainer, optimizers, evaluators, config.log_every_iters)
185175
#::: } else if (it.logger === 'neptune') { :::#
186-
logger = common.setup_neptune_logging(
187-
trainer, optimizers, evaluators, config.log_every_iters
188-
)
176+
logger = common.setup_neptune_logging(trainer, optimizers, evaluators, config.log_every_iters)
189177
#::: } else if (it.logger === 'polyaxon') { :::#
190-
logger = common.setup_plx_logging(
191-
trainer, optimizers, evaluators, config.log_every_iters
192-
)
178+
logger = common.setup_plx_logging(trainer, optimizers, evaluators, config.log_every_iters)
193179
#::: } else if (it.logger === 'tensorboard') { :::#
194180
logger = common.setup_tb_logging(
195181
config.output_dir,
@@ -199,13 +185,9 @@ def setup_exp_logging(config, trainer, optimizers, evaluators):
199185
config.log_every_iters,
200186
)
201187
#::: } else if (it.logger === 'visdom') { :::#
202-
logger = common.setup_visdom_logging(
203-
trainer, optimizers, evaluators, config.log_every_iters
204-
)
188+
logger = common.setup_visdom_logging(trainer, optimizers, evaluators, config.log_every_iters)
205189
#::: } else if (it.logger === 'wandb') { :::#
206-
logger = common.setup_wandb_logging(
207-
trainer, optimizers, evaluators, config.log_every_iters
208-
)
190+
logger = common.setup_wandb_logging(trainer, optimizers, evaluators, config.log_every_iters)
209191
#::: } :::#
210192
return logger
211193

src/templates/template-text-classification/data.py

+4-13
Original file line numberDiff line numberDiff line change
@@ -48,21 +48,12 @@ def setup_data(config):
4848
if local_rank > 0:
4949
idist.barrier()
5050
#::: } :::#
51-
52-
dataset_train, dataset_eval = load_dataset(
53-
"imdb", split=["train", "test"], cache_dir=config.data_path
54-
)
55-
tokenizer = AutoTokenizer.from_pretrained(
56-
config.model, cache_dir=config.tokenizer_dir, do_lower_case=True
57-
)
51+
dataset_train, dataset_eval = load_dataset("imdb", split=["train", "test"], cache_dir=config.data_path)
52+
tokenizer = AutoTokenizer.from_pretrained(config.model, cache_dir=config.tokenizer_dir, do_lower_case=True)
5853
train_texts, train_labels = dataset_train["text"], dataset_train["label"]
5954
test_texts, test_labels = dataset_eval["text"], dataset_eval["label"]
60-
dataset_train = TransformerDataset(
61-
train_texts, train_labels, tokenizer, config.max_length
62-
)
63-
dataset_eval = TransformerDataset(
64-
test_texts, test_labels, tokenizer, config.max_length
65-
)
55+
dataset_train = TransformerDataset(train_texts, train_labels, tokenizer, config.max_length)
56+
dataset_eval = TransformerDataset(test_texts, test_labels, tokenizer, config.max_length)
6657
#::: if (it.use_dist) { :::#
6758
if local_rank == 0:
6859
idist.barrier()

src/templates/template-text-classification/main.py

+4-12
Original file line numberDiff line numberDiff line change
@@ -45,9 +45,7 @@ def run(local_rank: int, config: Any):
4545
)
4646

4747
config.lr *= idist.get_world_size()
48-
optimizer = idist.auto_optim(
49-
optim.AdamW(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
50-
)
48+
optimizer = idist.auto_optim(optim.AdamW(model.parameters(), lr=config.lr, weight_decay=config.weight_decay))
5149
loss_fn = nn.BCEWithLogitsLoss().to(device=device)
5250

5351
le = config.num_iters_per_epoch
@@ -56,9 +54,7 @@ def run(local_rank: int, config: Any):
5654
(le * config.num_warmup_epochs, config.lr),
5755
(le * config.max_epochs, 0.0),
5856
]
59-
lr_scheduler = PiecewiseLinear(
60-
optimizer, param_name="lr", milestones_values=milestones_values
61-
)
57+
lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values)
6258

6359
# setup metrics to attach to evaluator
6460
metrics = {
@@ -67,9 +63,7 @@ def run(local_rank: int, config: Any):
6763
}
6864

6965
# trainer and evaluator
70-
trainer = setup_trainer(
71-
config, model, optimizer, loss_fn, device, dataloader_train.sampler
72-
)
66+
trainer = setup_trainer(config, model, optimizer, loss_fn, device, dataloader_train.sampler)
7367
evaluator = setup_evaluator(config, model, metrics, device)
7468

7569
# setup engines logger with python logging
@@ -98,9 +92,7 @@ def run(local_rank: int, config: Any):
9892
#::: } else { :::#
9993
to_save_eval = None
10094
#::: } :::#
101-
ckpt_handler_train, ckpt_handler_eval = setup_handlers(
102-
trainer, evaluator, config, to_save_train, to_save_eval
103-
)
95+
ckpt_handler_train, ckpt_handler_eval = setup_handlers(trainer, evaluator, config, to_save_train, to_save_eval)
10496
#::: } else if (it.patience || it.terminate_on_nan || it.limit_sec) { :::#
10597
setup_handlers(trainer, evaluator, config)
10698
#::: } :::#

src/templates/template-text-classification/trainers.py

+6-18
Original file line numberDiff line numberDiff line change
@@ -22,15 +22,9 @@ def setup_trainer(
2222

2323
def train_function(engine: Union[Engine, DeterministicEngine], batch: Any):
2424
input_ids = batch["input_ids"].to(device, non_blocking=True, dtype=torch.long)
25-
attention_mask = batch["attention_mask"].to(
26-
device, non_blocking=True, dtype=torch.long
27-
)
28-
token_type_ids = batch["token_type_ids"].to(
29-
device, non_blocking=True, dtype=torch.long
30-
)
31-
labels = (
32-
batch["label"].view(-1, 1).to(device, non_blocking=True, dtype=torch.float)
33-
)
25+
attention_mask = batch["attention_mask"].to(device, non_blocking=True, dtype=torch.long)
26+
token_type_ids = batch["token_type_ids"].to(device, non_blocking=True, dtype=torch.long)
27+
labels = batch["label"].view(-1, 1).to(device, non_blocking=True, dtype=torch.float)
3428

3529
model.train()
3630

@@ -75,15 +69,9 @@ def evalutate_function(engine: Engine, batch: Any):
7569
model.eval()
7670

7771
input_ids = batch["input_ids"].to(device, non_blocking=True, dtype=torch.long)
78-
attention_mask = batch["attention_mask"].to(
79-
device, non_blocking=True, dtype=torch.long
80-
)
81-
token_type_ids = batch["token_type_ids"].to(
82-
device, non_blocking=True, dtype=torch.long
83-
)
84-
labels = (
85-
batch["label"].view(-1, 1).to(device, non_blocking=True, dtype=torch.float)
86-
)
72+
attention_mask = batch["attention_mask"].to(device, non_blocking=True, dtype=torch.long)
73+
token_type_ids = batch["token_type_ids"].to(device, non_blocking=True, dtype=torch.long)
74+
labels = batch["label"].view(-1, 1).to(device, non_blocking=True, dtype=torch.float)
8775

8876
with autocast(enabled=config.use_amp):
8977
output = model(input_ids, attention_mask, token_type_ids)

src/templates/template-text-classification/utils.py

+1
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ def setup_handlers(
4646
#::: } :::#
4747

4848
#::: if (it.patience) { :::#
49+
4950
# early stopping
5051
def score_fn(engine: Engine):
5152
return engine.state.metrics["Accuracy"]

src/templates/template-vision-classification/main.py

+2-6
Original file line numberDiff line numberDiff line change
@@ -43,9 +43,7 @@ def run(local_rank: int, config: Any):
4343
lr_scheduler = PiecewiseLinear(optimizer, "lr", milestones_values=milestones_values)
4444

4545
# trainer and evaluator
46-
trainer = setup_trainer(
47-
config, model, optimizer, loss_fn, device, dataloader_train.sampler
48-
)
46+
trainer = setup_trainer(config, model, optimizer, loss_fn, device, dataloader_train.sampler)
4947
evaluator = setup_evaluator(config, model, device)
5048

5149
# attach metrics to evaluator
@@ -84,9 +82,7 @@ def run(local_rank: int, config: Any):
8482
#::: } else { :::#
8583
to_save_eval = None
8684
#::: } :::#
87-
ckpt_handler_train, ckpt_handler_eval = setup_handlers(
88-
trainer, evaluator, config, to_save_train, to_save_eval
89-
)
85+
ckpt_handler_train, ckpt_handler_eval = setup_handlers(trainer, evaluator, config, to_save_train, to_save_eval)
9086
#::: } else if (it.patience || it.terminate_on_nan || it.limit_sec) { :::#
9187
setup_handlers(trainer, evaluator, config)
9288
#::: } :::#

src/templates/template-vision-classification/test_all.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,7 @@ def set_up():
2323

2424
@pytest.mark.skipif(os.getenv("RUN_SLOW_TESTS", 0) == 0, reason="Skip slow tests")
2525
def test_setup_data():
26-
config = Namespace(
27-
data_path="~/data", train_batch_size=1, eval_batch_size=1, num_workers=0
28-
)
26+
config = Namespace(data_path="~/data", train_batch_size=1, eval_batch_size=1, num_workers=0)
2927
dataloader_train, dataloader_eval = setup_data(config)
3028

3129
assert isinstance(dataloader_train, DataLoader)

src/templates/template-vision-classification/utils.py

+1
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ def setup_handlers(
4646
#::: } :::#
4747

4848
#::: if (it.patience) { :::#
49+
4950
# early stopping
5051
def score_fn(engine: Engine):
5152
return -engine.state.metrics["eval_loss"]

src/templates/template-vision-dcgan/main.py

+3-9
Original file line numberDiff line numberDiff line change
@@ -49,12 +49,8 @@ def run(local_rank: int, config: Any):
4949
loss_fn = nn.BCELoss().to(device=device)
5050

5151
# optimizers
52-
optimizer_d = idist.auto_optim(
53-
optim.Adam(model_d.parameters(), lr=config.lr, betas=(0.5, 0.999))
54-
)
55-
optimizer_g = idist.auto_optim(
56-
optim.Adam(model_g.parameters(), lr=config.lr, betas=(0.5, 0.999))
57-
)
52+
optimizer_d = idist.auto_optim(optim.Adam(model_d.parameters(), lr=config.lr, betas=(0.5, 0.999)))
53+
optimizer_g = idist.auto_optim(optim.Adam(model_g.parameters(), lr=config.lr, betas=(0.5, 0.999)))
5854

5955
# trainer and evaluator
6056
trainer = setup_trainer(
@@ -100,9 +96,7 @@ def run(local_rank: int, config: Any):
10096
#::: } else { :::#
10197
to_save_train = None
10298
#::: } :::#
103-
ckpt_handler_train, ckpt_handler_eval = setup_handlers(
104-
trainer, evaluator, config, to_save_train, to_save_eval
105-
)
99+
ckpt_handler_train, ckpt_handler_eval = setup_handlers(trainer, evaluator, config, to_save_train, to_save_eval)
106100
#::: } else if (it.patience || it.terminate_on_nan || it.limit_sec) { :::#
107101
setup_handlers(trainer, evaluator, config)
108102
#::: } :::#

src/templates/template-vision-dcgan/test_all.py

+2-6
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,7 @@ def set_up():
2424

2525
@pytest.mark.skipif(os.getenv("RUN_SLOW_TESTS", 0) == 0, reason="Skip slow tests")
2626
def test_setup_data():
27-
config = Namespace(
28-
data_path="~/data", train_batch_size=1, eval_batch_size=1, num_workers=0
29-
)
27+
config = Namespace(data_path="~/data", train_batch_size=1, eval_batch_size=1, num_workers=0)
3028
dataloader_train, dataloader_eval, _ = setup_data(config)
3129

3230
assert isinstance(dataloader_train, DataLoader)
@@ -61,8 +59,6 @@ def test_models():
6159
def test_setup_trainer():
6260
model, optimizer, device, loss_fn, batch = set_up()
6361
config = Namespace(use_amp=False, train_batch_size=2, z_dim=100)
64-
trainer = setup_trainer(
65-
config, model, model, optimizer, optimizer, loss_fn, device, None
66-
)
62+
trainer = setup_trainer(config, model, model, optimizer, optimizer, loss_fn, device, None)
6763
trainer.run([batch, batch])
6864
assert isinstance(trainer.state.output, dict)

src/templates/template-vision-dcgan/trainers.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,7 @@ def setup_trainer(
2323

2424
real_labels = torch.ones(config.train_batch_size // ws, device=device)
2525
fake_labels = torch.zeros(config.train_batch_size // ws, device=device)
26-
noise = torch.randn(
27-
config.train_batch_size // ws, config.z_dim, 1, 1, device=device
28-
)
26+
noise = torch.randn(config.train_batch_size // ws, config.z_dim, 1, 1, device=device)
2927

3028
def train_function(engine: Union[Engine, DeterministicEngine], batch: Any):
3129
model_g.train()

src/templates/template-vision-dcgan/utils.py

+1
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ def setup_handlers(
4646
#::: } :::#
4747

4848
#::: if (it.patience) { :::#
49+
4950
# early stopping
5051
def score_fn(engine: Engine):
5152
return -engine.state.metrics["errD"]

src/templates/template-vision-segmentation/data.py

+2-7
Original file line numberDiff line numberDiff line change
@@ -88,9 +88,7 @@ def setup_data(config: Namespace):
8888
"Dataset not found. You can use `download_datasets` from data.py function to download it."
8989
) from e
9090

91-
dataset_eval = VOCSegmentationPIL(
92-
root=config.data_path, year="2012", image_set="val", download=False
93-
)
91+
dataset_eval = VOCSegmentationPIL(root=config.data_path, year="2012", image_set="val", download=False)
9492

9593
val_img_size = 513
9694
train_img_size = 480
@@ -100,9 +98,7 @@ def setup_data(config: Namespace):
10098

10199
transform_train = A.Compose(
102100
[
103-
A.RandomScale(
104-
scale_limit=(0.0, 1.5), interpolation=cv2.INTER_LINEAR, p=1.0
105-
),
101+
A.RandomScale(scale_limit=(0.0, 1.5), interpolation=cv2.INTER_LINEAR, p=1.0),
106102
A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT),
107103
A.RandomCrop(train_img_size, train_img_size),
108104
A.HorizontalFlip(),
@@ -176,7 +172,6 @@ def download_datasets(data_path):
176172
# Ensure that only rank 0 download the dataset
177173
idist.barrier()
178174
#::: } :::#
179-
180175
VOCSegmentation(data_path, image_set="train", download=True)
181176
VOCSegmentation(data_path, image_set="val", download=True)
182177
#::: if (it.use_dist) { :::#

src/templates/template-vision-segmentation/main.py

+2-6
Original file line numberDiff line numberDiff line change
@@ -66,9 +66,7 @@ def run(local_rank: int, config: Any):
6666
metrics = {"IoU": IoU(cm_metric), "mIoU_bg": mIoU(cm_metric)}
6767

6868
# trainer and evaluator
69-
trainer = setup_trainer(
70-
config, model, optimizer, loss_fn, device, dataloader_train.sampler
71-
)
69+
trainer = setup_trainer(config, model, optimizer, loss_fn, device, dataloader_train.sampler)
7270
evaluator = setup_evaluator(config, model, metrics, device)
7371

7472
# setup engines logger with python logging
@@ -105,9 +103,7 @@ def run(local_rank: int, config: Any):
105103
#::: } else { :::#
106104
to_save_eval = None
107105
#::: } :::#
108-
ckpt_handler_train, ckpt_handler_eval = setup_handlers(
109-
trainer, evaluator, config, to_save_train, to_save_eval
110-
)
106+
ckpt_handler_train, ckpt_handler_eval = setup_handlers(trainer, evaluator, config, to_save_train, to_save_eval)
111107
#::: } else if (it.patience || it.terminate_on_nan || it.limit_sec) { :::#
112108
setup_handlers(trainer, evaluator, config)
113109
#::: } :::#

0 commit comments

Comments
 (0)