From c501f7bb6290ee05276b8334fa87d387604f6aeb Mon Sep 17 00:00:00 2001 From: Jeremy Jordan Date: Sun, 26 Jan 2020 15:10:43 -0500 Subject: [PATCH 1/2] add more detail to tbptt example --- pytorch_lightning/core/lightning.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index 23757110c4aa8..a97298f79b6f7 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -169,6 +169,14 @@ def training_step(self, batch, batch_idx, optimizer_idx): # Truncated back-propagation through time def training_step(self, batch, batch_idx, hiddens): # hiddens are the hiddens from the previous truncated backprop step + ... + out, hiddens = self.lstm(data, hiddens) + ... + + return { + "loss": ..., + "hiddens": hiddens # remember to detach() this + } You can also return a -1 instead of a dict to stop the current loop. This is useful if you want to break out of the current training epoch early. From 897dfe1ef595bc0106a51d25f38eda80d1008e78 Mon Sep 17 00:00:00 2001 From: Jeremy Jordan Date: Sun, 26 Jan 2020 15:24:56 -0500 Subject: [PATCH 2/2] warn user about new arg in training_step --- pytorch_lightning/trainer/trainer.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index c2add79416021..ed767b9ebee05 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -413,6 +413,10 @@ def __init__( # backprop every 5 steps in a batch trainer = Trainer(truncated_bptt_steps=5) + Using this feature requires updating your LightningModule's `training_step()` to include + a `hiddens` arg. + + resume_from_checkpoint (str): To resume training from a specific checkpoint pass in the path here.k Example:: # default used by the Trainer