Skip to content

Commit

Permalink
Removing unecessary early stopping calls
Browse files Browse the repository at this point in the history
  • Loading branch information
Mateusz Pieniak authored and Borda committed May 22, 2020
1 parent 8f6b7a2 commit b1c713d
Showing 1 changed file with 0 additions and 6 deletions.
6 changes: 0 additions & 6 deletions pytorch_lightning/trainer/training_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -452,7 +452,6 @@ def run_training_epoch(self):
if self.fast_dev_run or should_check_val:
self.run_evaluation(test_mode=self.testing)
self.call_checkpoint_callback()
self.call_early_stop_callback()

# when logs should be saved
should_save_log = (batch_idx + 1) % self.log_save_interval == 0 or early_stop_epoch
Expand Down Expand Up @@ -498,7 +497,6 @@ def run_training_epoch(self):
# when no val loop is present or fast-dev-run still need to call checkpoints
if not self.is_overridden('validation_step') and not (self.fast_dev_run or should_check_val):
self.call_checkpoint_callback()
self.call_early_stop_callback()

# Epoch end events
with self.profiler.profile('on_epoch_end'):
Expand Down Expand Up @@ -791,10 +789,6 @@ def call_checkpoint_callback(self):
if self.checkpoint_callback is not None:
self.checkpoint_callback.on_validation_end(self, self.get_model())

def call_early_stop_callback(self):
if self.early_stop_callback:
self.early_stop_callback.on_epoch_end(self, self.get_model())


def _with_is_last(iterable):
"""Pass through values from the given iterable with an added boolean indicating if this is the last item.
Expand Down

0 comments on commit b1c713d

Please sign in to comment.