diff --git a/pytorch_lightning/trainer/training_loop.py b/pytorch_lightning/trainer/training_loop.py index be0735701850a..1f6a36eb56c89 100644 --- a/pytorch_lightning/trainer/training_loop.py +++ b/pytorch_lightning/trainer/training_loop.py @@ -743,7 +743,8 @@ def call_optimizer_step(self, optimizer, opt_idx, batch_idx, split_batch): # when using 16-bit else: native_amp = self.use_amp and NATIVE_AMP_AVALAIBLE - model.optimizer_step(self.current_epoch, batch_idx, optimizer, opt_idx, lambda_closure, native_amp) + model.optimizer_step(self.current_epoch, batch_idx, optimizer, opt_idx, lambda_closure, + using_native_amp=native_amp) # in native 16-bit we need to update scaler after optimizer step if self.use_amp and NATIVE_AMP_AVALAIBLE: