We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent aa798b7 commit 77acabdCopy full SHA for 77acabd
src/transformers/trainer.py
@@ -3077,7 +3077,11 @@ def _maybe_log_save_evaluate(
3077
# reset tr_loss to zero
3078
tr_loss -= tr_loss
3079
3080
- logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
+ log_eval = tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged)
3081
+ if log_eval > 1e-4:
3082
+ logs["loss"] = str(round(log_eval))
3083
+ else:
3084
+ logs["loss"] = "{:e}".format(log_eval)
3085
if grad_norm is not None:
3086
logs["grad_norm"] = grad_norm.item() if isinstance(grad_norm, torch.Tensor) else grad_norm
3087
if learning_rate is not None:
0 commit comments