Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

drop deprecated result object 1/n #5005

Merged
merged 3 commits into from
Feb 17, 2021
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 1 addition & 31 deletions pytorch_lightning/trainer/training_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from pytorch_lightning.trainer.states import RunningStage, TrainerState
from pytorch_lightning.trainer.supporters import Accumulator, TensorRunningAccum
from pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType, parsing
from pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.distributed import rank_zero_info
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from pytorch_lightning.utilities.model_helpers import is_overridden
Expand Down Expand Up @@ -341,13 +341,6 @@ def _process_training_step_output(self, training_step_output, split_batch):
if training_step_output_for_epoch_end is None:
return None, None

# -----------------------------------------
# process result return (DEPRECATE in 1.0)
# -----------------------------------------
if isinstance(training_step_output, Result):
training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)
return training_step_output_for_epoch_end, training_step_output

# -----------------------------------------
# process hybrid (1.0)
# -----------------------------------------
Expand Down Expand Up @@ -413,29 +406,6 @@ def _process_training_step_output_1_0(self, training_step_output, split_batch):

return training_step_output_for_epoch_end, training_step_output

def _process_result(self, training_step_output, split_batch):
training_step_output.track_batch_size(len(split_batch))
m = """
TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.
Use self.log and .write from the LightningModule to log metrics and write predictions.
training_step can now only return a scalar (for the loss) or a dictionary with anything you want.

Option 1:
return loss

Option 2:
return {'loss': loss, 'anything_else': ...}

Option 3:
return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}
"""
rank_zero_warn(m)

training_step_output_for_epoch_end = copy(training_step_output)
training_step_output_for_epoch_end.detach()

return training_step_output_for_epoch_end

def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
model_ref = self.trainer.get_model()

Expand Down