16
16
17
17
#::: } :::#
18
18
#::: if (it.save_training || it.save_evaluation) { :::#
19
- from ignite .handlers import (
20
- Checkpoint ,
21
- DiskSaver ,
22
- global_step_from_engine ,
23
- ) # usort: skip
19
+ from ignite .handlers import Checkpoint , DiskSaver , global_step_from_engine # usort: skip
24
20
25
21
#::: } else { :::#
26
22
from ignite .handlers import Checkpoint
@@ -84,9 +80,7 @@ def log_metrics(engine: Engine, tag: str) -> None:
84
80
tag
85
81
a string to add at the start of output.
86
82
"""
87
- metrics_format = "{0} [{1}/{2}]: {3}" .format (
88
- tag , engine .state .epoch , engine .state .iteration , engine .state .metrics
89
- )
83
+ metrics_format = "{0} [{1}/{2}]: {3}" .format (tag , engine .state .epoch , engine .state .iteration , engine .state .metrics )
90
84
engine .logger .info (metrics_format )
91
85
92
86
@@ -175,21 +169,13 @@ def setup_exp_logging(config, trainer, optimizers, evaluators):
175
169
"""Setup Experiment Tracking logger from Ignite."""
176
170
177
171
#::: if (it.logger === 'clearml') { :::#
178
- logger = common .setup_clearml_logging (
179
- trainer , optimizers , evaluators , config .log_every_iters
180
- )
172
+ logger = common .setup_clearml_logging (trainer , optimizers , evaluators , config .log_every_iters )
181
173
#::: } else if (it.logger === 'mlflow') { :::#
182
- logger = common .setup_mlflow_logging (
183
- trainer , optimizers , evaluators , config .log_every_iters
184
- )
174
+ logger = common .setup_mlflow_logging (trainer , optimizers , evaluators , config .log_every_iters )
185
175
#::: } else if (it.logger === 'neptune') { :::#
186
- logger = common .setup_neptune_logging (
187
- trainer , optimizers , evaluators , config .log_every_iters
188
- )
176
+ logger = common .setup_neptune_logging (trainer , optimizers , evaluators , config .log_every_iters )
189
177
#::: } else if (it.logger === 'polyaxon') { :::#
190
- logger = common .setup_plx_logging (
191
- trainer , optimizers , evaluators , config .log_every_iters
192
- )
178
+ logger = common .setup_plx_logging (trainer , optimizers , evaluators , config .log_every_iters )
193
179
#::: } else if (it.logger === 'tensorboard') { :::#
194
180
logger = common .setup_tb_logging (
195
181
config .output_dir ,
@@ -199,13 +185,9 @@ def setup_exp_logging(config, trainer, optimizers, evaluators):
199
185
config .log_every_iters ,
200
186
)
201
187
#::: } else if (it.logger === 'visdom') { :::#
202
- logger = common .setup_visdom_logging (
203
- trainer , optimizers , evaluators , config .log_every_iters
204
- )
188
+ logger = common .setup_visdom_logging (trainer , optimizers , evaluators , config .log_every_iters )
205
189
#::: } else if (it.logger === 'wandb') { :::#
206
- logger = common .setup_wandb_logging (
207
- trainer , optimizers , evaluators , config .log_every_iters
208
- )
190
+ logger = common .setup_wandb_logging (trainer , optimizers , evaluators , config .log_every_iters )
209
191
#::: } :::#
210
192
return logger
211
193
0 commit comments