Skip to content

Commit

Permalink
Merge 9a2b089 into c9622ba
Browse files Browse the repository at this point in the history
  • Loading branch information
Borda authored Feb 17, 2021
2 parents c9622ba + 9a2b089 commit 5654206
Showing 1 changed file with 1 addition and 31 deletions.
32 changes: 1 addition & 31 deletions pytorch_lightning/trainer/training_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from pytorch_lightning.trainer.states import RunningStage, TrainerState
from pytorch_lightning.trainer.supporters import Accumulator, TensorRunningAccum
from pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType, parsing
from pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.distributed import rank_zero_info
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from pytorch_lightning.utilities.model_helpers import is_overridden
Expand Down Expand Up @@ -341,13 +341,6 @@ def _process_training_step_output(self, training_step_output, split_batch):
if training_step_output_for_epoch_end is None:
return None, None

# -----------------------------------------
# process result return (DEPRECATE in 1.0)
# -----------------------------------------
if isinstance(training_step_output, Result):
training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)
return training_step_output_for_epoch_end, training_step_output

# -----------------------------------------
# process hybrid (1.0)
# -----------------------------------------
Expand Down Expand Up @@ -413,29 +406,6 @@ def _process_training_step_output_1_0(self, training_step_output, split_batch):

return training_step_output_for_epoch_end, training_step_output

def _process_result(self, training_step_output, split_batch):
training_step_output.track_batch_size(len(split_batch))
m = """
TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.
Use self.log and .write from the LightningModule to log metrics and write predictions.
training_step can now only return a scalar (for the loss) or a dictionary with anything you want.
Option 1:
return loss
Option 2:
return {'loss': loss, 'anything_else': ...}
Option 3:
return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}
"""
rank_zero_warn(m)

training_step_output_for_epoch_end = copy(training_step_output)
training_step_output_for_epoch_end.detach()

return training_step_output_for_epoch_end

def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
model_ref = self.trainer.get_model()

Expand Down

0 comments on commit 5654206

Please sign in to comment.