Skip to content

Commit 920ac61

Browse files
author
Jeff Yang
authored
fix(templates): merge handlers.py into utils.py (#55)
* fix(templates): merge handlers.py into utils.py * fix: includes -> include
1 parent c791f60 commit 920ac61

File tree

9 files changed

+8
-9
lines changed

9 files changed

+8
-9
lines changed

templates/gan/handlers.py

-1
This file was deleted.

templates/gan/test_all.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
import torch
99
from config import get_default_parser
1010
from datasets import get_datasets
11-
from handlers import get_handlers, get_logger
1211
from ignite.contrib.handlers import (
1312
ClearMLLogger,
1413
MLflowLogger,
@@ -28,7 +27,7 @@
2827
from torch import nn, optim
2928
from torch.utils.data import Dataset
3029
from trainers import create_trainers, train_function
31-
from utils import hash_checkpoint, log_metrics, resume_from, setup_logging
30+
from utils import hash_checkpoint, log_metrics, resume_from, setup_logging, get_handlers, get_logger
3231

3332

3433
class TestDataset(unittest.TestCase):

templates/gan/utils.py

+2
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@
2121
from torch.optim.optimizer import Optimizer
2222

2323
from models import Generator, Discriminator
24+
{% include "_handlers.py" %}
25+
2426

2527
# we can use `idist.auto_model` to handle distributed configurations
2628
# for your model : https://pytorch.org/ignite/distributed.html#ignite.distributed.auto.auto_model

templates/image_classification/handlers.py

-1
This file was deleted.

templates/image_classification/test_all.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99
import torch
1010
from config import get_default_parser
1111
from datasets import get_datasets
12-
from handlers import get_handlers, get_logger
1312
from ignite.contrib.handlers import (
1413
ClearMLLogger,
1514
MLflowLogger,
@@ -36,7 +35,7 @@
3635
train_events_to_attr,
3736
train_function,
3837
)
39-
from utils import hash_checkpoint, initialize, log_metrics, resume_from, setup_logging
38+
from utils import hash_checkpoint, initialize, log_metrics, resume_from, setup_logging, get_handlers, get_logger
4039

4140

4241
class TestDataset(unittest.TestCase):

templates/image_classification/utils.py

+2
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@
2121
from torch.optim import Optimizer, SGD
2222

2323
from models import get_model
24+
{% include "_handlers.py" %}
25+
2426

2527

2628
def initialize(config: Optional[Any]) -> Tuple[Module, Optimizer, Module, Union[_LRScheduler, ParamScheduler]]:

templates/single/handlers.py

-1
This file was deleted.

templates/single/test_all.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -26,15 +26,14 @@
2626
from torch import nn, optim
2727

2828
from config import get_default_parser
29-
from handlers import get_handlers, get_logger
3029
from trainers import (
3130
TrainEvents,
3231
create_trainers,
3332
evaluate_function,
3433
train_events_to_attr,
3534
train_function,
3635
)
37-
from utils import hash_checkpoint, log_metrics, resume_from, setup_logging
36+
from utils import hash_checkpoint, log_metrics, resume_from, setup_logging, get_handlers, get_logger
3837

3938

4039
class TestHandlers(unittest.TestCase):

templates/single/utils.py

+1
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
from torch.nn import Module
1919
from torch.optim.lr_scheduler import _LRScheduler
2020
from torch.optim.optimizer import Optimizer
21+
{% include "_handlers.py" %}
2122

2223

2324
# we can use `idist.auto_model` to handle distributed configurations

0 commit comments

Comments
 (0)