From 27bba1a03ad5c0f868e09844af3c2a4fc7e8521b Mon Sep 17 00:00:00 2001 From: Peter Izsak Date: Sun, 16 Feb 2020 06:48:19 +0200 Subject: [PATCH] Fix global_step when gradient accumulation > 1 (#832) --- pytorch_lightning/trainer/training_loop.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pytorch_lightning/trainer/training_loop.py b/pytorch_lightning/trainer/training_loop.py index 3e9a906016b39..69436500941b5 100644 --- a/pytorch_lightning/trainer/training_loop.py +++ b/pytorch_lightning/trainer/training_loop.py @@ -426,7 +426,9 @@ def run_training_epoch(self): # logs user requested information to logger self.log_metrics(batch_step_metrics, grad_norm_dic) - self.global_step += 1 + # progress global step according to grads progress + if (self.batch_idx + 1) % self.accumulate_grad_batches == 0: + self.global_step += 1 self.total_batch_idx += 1 # end epoch early