Skip to content

Commit

Permalink
Fix early stopping off by 2 (min_epochs) (#617)
Browse files Browse the repository at this point in the history
* fix early stopping off by 2

* add min_epochs example in docs
  • Loading branch information
Adrian Wälchli authored and williamFalcon committed Dec 9, 2019
1 parent d562172 commit e2ee4dd
Showing 1 changed file with 4 additions and 1 deletion.
5 changes: 4 additions & 1 deletion pytorch_lightning/trainer/training_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@
# pass in your own to override the default callback
trainer = Trainer(early_stop_callback=early_stop_callback)
# pass in min_epochs to enable the callback after min_epochs have run
trainer = Trainer(early_stop_callback=early_stop_callback, min_epochs=5)
# pass in None to disable it
trainer = Trainer(early_stop_callback=None)
Expand Down Expand Up @@ -339,7 +342,7 @@ def train(self):
self.reduce_lr_on_plateau_scheduler.step(val_loss, epoch=self.current_epoch)

# early stopping
met_min_epochs = epoch > self.min_epochs
met_min_epochs = epoch >= self.min_epochs - 1
if self.enable_early_stop and (met_min_epochs or self.fast_dev_run):
should_stop = self.early_stop_callback.on_epoch_end(epoch=epoch,
logs=self.callback_metrics)
Expand Down

0 comments on commit e2ee4dd

Please sign in to comment.