@@ -49,19 +49,20 @@ class WandbLogger(LightningLoggerBase):
49
49
50
50
Args:
51
51
name: Display name for the run.
52
- save_dir: Path where data is saved.
52
+ save_dir: Path where data is saved (wandb dir by default) .
53
53
offline: Run offline (data can be streamed later to wandb servers).
54
54
id: Sets the version, mainly used to resume a previous run.
55
+ version: Same as id.
55
56
anonymous: Enables or explicitly disables anonymous logging.
56
- version: Sets the version, mainly used to resume a previous run.
57
57
project: The name of the project to which this run will belong.
58
58
log_model: Save checkpoints in wandb dir to upload on W&B servers.
59
- experiment: WandB experiment object.
60
59
prefix: A string to put at the beginning of metric keys.
60
+ sync_step: Sync Trainer step with wandb step.
61
+ experiment: WandB experiment object. Automatically set when creating a run.
61
62
\**kwargs: Additional arguments like `entity`, `group`, `tags`, etc. used by
62
63
:func:`wandb.init` can be passed as keyword arguments in this logger.
63
64
64
- Example::
65
+ Example:
65
66
66
67
.. code-block:: python
67
68
@@ -74,9 +75,9 @@ class WandbLogger(LightningLoggerBase):
74
75
make sure to use `commit=False` so the logging step does not increase.
75
76
76
77
See Also:
77
- - `Tutorial <https://app.wandb.ai/cayush/pytorchlightning/reports/
78
- Use-Pytorch-Lightning- with-Weights-%26-Biases--Vmlldzo2NjQ1Mw>`__
79
- on how to use W&B with Pytorch Lightning.
78
+ - `Tutorial <https://colab.research.google.com/drive/16d1uctGaw2y9KhGBlINNTsWpmlXdJwRW?usp=sharing>`__
79
+ on how to use W&B with PyTorch Lightning
80
+ - ` W&B Documentation <https://docs.wandb.ai/integrations/lightning>`__
80
81
81
82
"""
82
83
@@ -86,14 +87,15 @@ def __init__(
86
87
self ,
87
88
name : Optional [str ] = None ,
88
89
save_dir : Optional [str ] = None ,
89
- offline : bool = False ,
90
+ offline : Optional [ bool ] = False ,
90
91
id : Optional [str ] = None ,
91
- anonymous : bool = False ,
92
+ anonymous : Optional [ bool ] = False ,
92
93
version : Optional [str ] = None ,
93
94
project : Optional [str ] = None ,
94
- log_model : bool = False ,
95
+ log_model : Optional [ bool ] = False ,
95
96
experiment = None ,
96
- prefix : str = '' ,
97
+ prefix : Optional [str ] = '' ,
98
+ sync_step : Optional [bool ] = True ,
97
99
** kwargs
98
100
):
99
101
if wandb is None :
@@ -102,13 +104,14 @@ def __init__(
102
104
super ().__init__ ()
103
105
self ._name = name
104
106
self ._save_dir = save_dir
105
- self ._anonymous = 'allow' if anonymous else None
107
+ self ._offline = offline
106
108
self ._id = version or id
109
+ self ._anonymous = 'allow' if anonymous else None
107
110
self ._project = project
108
- self ._experiment = experiment
109
- self ._offline = offline
110
111
self ._log_model = log_model
111
112
self ._prefix = prefix
113
+ self ._sync_step = sync_step
114
+ self ._experiment = experiment
112
115
self ._kwargs = kwargs
113
116
# logging multiple Trainer on a single W&B run (k-fold, resuming, etc)
114
117
self ._step_offset = 0
@@ -164,11 +167,16 @@ def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) ->
164
167
assert rank_zero_only .rank == 0 , 'experiment tried to log from global_rank != 0'
165
168
166
169
metrics = self ._add_prefix (metrics )
167
- if step is not None and step + self ._step_offset < self .experiment .step :
170
+ if self . _sync_step and step is not None and step + self ._step_offset < self .experiment .step :
168
171
self .warning_cache .warn (
169
- 'Trying to log at a previous step. Use `commit=False` when logging metrics manually.'
170
- )
171
- self .experiment .log (metrics , step = (step + self ._step_offset ) if step is not None else None )
172
+ 'Trying to log at a previous step. Use `WandbLogger(sync_step=False)`'
173
+ ' or try logging with `commit=False` when calling manually `wandb.log`.' )
174
+ if self ._sync_step :
175
+ self .experiment .log (metrics , step = (step + self ._step_offset ) if step is not None else None )
176
+ elif step is not None :
177
+ self .experiment .log ({** metrics , 'trainer_step' : (step + self ._step_offset )})
178
+ else :
179
+ self .experiment .log (metrics )
172
180
173
181
@property
174
182
def save_dir (self ) -> Optional [str ]:
0 commit comments