diff --git a/pytorch_lightning/trainer/training_loop.py b/pytorch_lightning/trainer/training_loop.py index d8bdb3f49c585..aa52b91f407e8 100644 --- a/pytorch_lightning/trainer/training_loop.py +++ b/pytorch_lightning/trainer/training_loop.py @@ -362,7 +362,7 @@ def train(self): # update LR schedulers if self.lr_schedulers is not None: for lr_scheduler in self.lr_schedulers: - lr_scheduler.step(epoch=self.current_epoch) + lr_scheduler.step() if self.reduce_lr_on_plateau_scheduler is not None: val_loss = self.callback_metrics.get('val_loss') if val_loss is None: @@ -370,7 +370,7 @@ def train(self): m = f'ReduceLROnPlateau conditioned on metric val_loss ' \ f'which is not available. Available metrics are: {avail_metrics}' raise MisconfigurationException(m) - self.reduce_lr_on_plateau_scheduler.step(val_loss, epoch=self.current_epoch) + self.reduce_lr_on_plateau_scheduler.step(val_loss) # early stopping met_min_epochs = epoch >= self.min_epochs - 1