Skip to content

Commit

Permalink
calling self.forward() -> self() (#1211)
Browse files Browse the repository at this point in the history
* self.forward() -> self()

* update changelog

Co-authored-by: Jirka Borovec <Borda@users.noreply.github.com>
  • Loading branch information
jeremyjordan and Borda committed Mar 27, 2020
1 parent 2a4cd47 commit d394b80
Show file tree
Hide file tree
Showing 15 changed files with 46 additions and 45 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Added support for `IterableDataset` in validation and testing ([#1104](https://github.com/PyTorchLightning/pytorch-lightning/pull/1104))
- Added support for non-primitive types in `hparams` for `TensorboardLogger` ([#1130](https://github.com/PyTorchLightning/pytorch-lightning/pull/1130))
- Added a check that stops the training when loss or weights contain `NaN` or `inf` values. ([#1097](https://github.com/PyTorchLightning/pytorch-lightning/pull/1097))
- Updated references to self.forward() to instead use the `__call__` interface. ([#1211](https://github.com/PyTorchLightning/pytorch-lightning/pull/1211))

### Changed

Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def validation_step(self, batch, batch_idx):
x, y = batch

# or as basic as a CNN classification
out = self.forward(x)
out = self(x)
loss = my_loss(out, y)
return {'loss': loss}
```
Expand Down
4 changes: 2 additions & 2 deletions docs/source/child_modules.rst
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ that change in the `Autoencoder` model are the init, forward, training, validati
x, _ = batch
representation = self.encoder(x)
x_hat = self.forward(representation)
x_hat = self(representation)
loss = MSE(x, x_hat)
return loss
Expand All @@ -38,7 +38,7 @@ that change in the `Autoencoder` model are the init, forward, training, validati
def _shared_eval(self, batch, batch_idx, prefix):
x, y = batch
representation = self.encoder(x)
x_hat = self.forward(representation)
x_hat = self(representation)
loss = F.nll_loss(logits, y)
return {f'{prefix}_loss': loss}
Expand Down
14 changes: 7 additions & 7 deletions docs/source/introduction_guide.rst
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ in the LightningModule
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
logits = self(x)
loss = F.nll_loss(logits, y)
return {'loss': loss}
# return loss (also works)
Expand Down Expand Up @@ -371,7 +371,7 @@ For clarity, we'll recall that the full LightningModule now looks like this.
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
logits = self(x)
loss = F.nll_loss(logits, y)
# add logging
Expand Down Expand Up @@ -684,7 +684,7 @@ sample split in the `train_dataloader` method.
class LitMNIST(pl.LightningModule):
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
logits = self(x)
loss = F.nll_loss(logits, y)
return {'val_loss': loss}
Expand Down Expand Up @@ -740,7 +740,7 @@ Just like the validation loop, we define exactly the same steps for testing:
class LitMNIST(pl.LightningModule):
def test_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
logits = self(x)
loss = F.nll_loss(logits, y)
return {'val_loss': loss}
Expand Down Expand Up @@ -827,7 +827,7 @@ within it.
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
logits = self(x)
loss = F.nll_loss(logits, y)
return loss
Expand Down Expand Up @@ -855,7 +855,7 @@ In this case, we've set this LightningModel to predict logits. But we could also
def training_step(self, batch, batch_idx):
x, y = batch
out, l1_feats, l2_feats, l3_feats = self.forward(x)
out, l1_feats, l2_feats, l3_feats = self(x)
logits = torch.log_softmax(out, dim=1)
ce_loss = F.nll_loss(logits, y)
loss = perceptual_loss(l1_feats, l2_feats, l3_feats) + ce_loss
Expand All @@ -880,7 +880,7 @@ Or maybe we have a model that we use to do generation
def training_step(self, batch, batch_idx):
x, y = batch
representation = self.encoder(x)
imgs = self.forward(representation)
imgs = self(representation)
loss = perceptual_loss(imgs, x)
return loss
Expand Down
2 changes: 1 addition & 1 deletion docs/source/multi_gpu.rst
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ to illustrate why this is needed, let's look at dataparallel
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(batch)
y_hat = self(batch)
# on dp or ddp2 if we did softmax now it would be wrong
# because batch is actually a piece of the full batch
Expand Down
4 changes: 2 additions & 2 deletions pl_examples/basic_examples/lightning_module_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def training_step(self, batch, batch_idx):
x, y = batch
x = x.view(x.size(0), -1)

y_hat = self.forward(x)
y_hat = self(x)

# calculate loss
loss_val = self.loss(y, y_hat)
Expand All @@ -133,7 +133,7 @@ def validation_step(self, batch, batch_idx):
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
y_hat = self(x)

loss_val = self.loss(y, y_hat)

Expand Down
4 changes: 2 additions & 2 deletions pl_examples/domain_templates/gan.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def training_step(self, batch, batch_idx, optimizer_idx):
z = z.cuda(imgs.device.index)

# generate images
self.generated_imgs = self.forward(z)
self.generated_imgs = self(z)

# log sampled images
# sample_imgs = self.generated_imgs[:6]
Expand Down Expand Up @@ -179,7 +179,7 @@ def on_epoch_end(self):
z = z.cuda(self.last_imgs.device.index)

# log sampled images
sample_imgs = self.forward(z)
sample_imgs = self(z)
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image(f'generated_images', grid, self.current_epoch)

Expand Down
4 changes: 2 additions & 2 deletions pl_examples/full_examples/imagenet/imagenet_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def forward(self, x):

def training_step(self, batch, batch_idx):
images, target = batch
output = self.forward(images)
output = self(images)
loss_val = F.cross_entropy(output, target)
acc1, acc5 = self.__accuracy(output, target, topk=(1, 5))

Expand All @@ -65,7 +65,7 @@ def training_step(self, batch, batch_idx):

def validation_step(self, batch, batch_idx):
images, target = batch
output = self.forward(images)
output = self(images)
loss_val = F.cross_entropy(output, target)
acc1, acc5 = self.__accuracy(output, target, topk=(1, 5))

Expand Down
2 changes: 1 addition & 1 deletion pl_examples/full_examples/semantic_segmentation/semseg.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def training_step(self, batch, batch_nb):
img, mask = batch
img = img.float()
mask = mask.long()
out = self.forward(img)
out = self(img)
loss_val = F.cross_entropy(out, mask, ignore_index=250)
return {'loss': loss_val}

Expand Down
6 changes: 3 additions & 3 deletions pytorch_lightning/core/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def forward(self, x):
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(x)
y_hat = self(x)
return {'loss': F.cross_entropy(y_hat, y)}
def train_dataloader(self):
Expand Down Expand Up @@ -159,7 +159,7 @@ def configure_optimizers(self):
class LitModel(pl.LightningModule):
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(x)
y_hat = self(x)
return {'val_loss': F.cross_entropy(y_hat, y)}
def validation_epoch_end(self, outputs):
Expand All @@ -178,7 +178,7 @@ def val_dataloader(self):
class LitModel(pl.LightningModule):
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(x)
y_hat = self(x)
return {'test_loss': F.cross_entropy(y_hat, y)}
def test_epoch_end(self, outputs):
Expand Down
22 changes: 11 additions & 11 deletions pytorch_lightning/core/lightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def forward(self, *args, **kwargs):
Same as torch.nn.Module.forward(), however in Lightning you want this to define
the operations you want to use for prediction (ie: on a server or as a feature extractor).
Normally you'd call self.forward() from your training_step() method.
Normally you'd call self() from your training_step() method.
This makes it easy to write a complex system for training with the outputs
you'd want in a prediction setting.
Expand All @@ -117,7 +117,7 @@ def forward(self, x):
def training_step(self, batch, batch_idx):
x, y = batch
feature_maps = self.forward(x)
feature_maps = self(x)
logits = self.classifier(feature_maps)
# ...
Expand Down Expand Up @@ -171,7 +171,7 @@ def training_step(self, batch, batch_idx):
x, y, z = batch
# implement your own
out = self.forward(x)
out = self(x)
loss = self.loss(out, x)
logger_logs = {'training_loss': loss} # optional (MUST ALL BE TENSORS)
Expand Down Expand Up @@ -266,7 +266,7 @@ def training_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.forward(x)
out = self(x)
loss = self.softmax(out)
loss = nce_loss(loss)
return {'loss': loss}
Expand All @@ -277,7 +277,7 @@ def training_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.forward(x)
out = self(x)
return {'out': out}
def training_step_end(self, outputs):
Expand Down Expand Up @@ -342,7 +342,7 @@ def validation_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self.forward(x)
out = self(x)
loss = self.loss(out, y)
# log 6 example images
Expand Down Expand Up @@ -413,7 +413,7 @@ def validation_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.forward(x)
out = self(x)
loss = self.softmax(out)
loss = nce_loss(loss)
return {'loss': loss}
Expand All @@ -424,7 +424,7 @@ def validation_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.forward(x)
out = self(x)
return {'out': out}
def validation_epoch_end(self, outputs):
Expand Down Expand Up @@ -564,7 +564,7 @@ def test_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self.forward(x)
out = self(x)
loss = self.loss(out, y)
# log 6 example images
Expand Down Expand Up @@ -636,7 +636,7 @@ def test_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.forward(x)
out = self(x)
loss = self.softmax(out)
loss = nce_loss(loss)
return {'loss': loss}
Expand All @@ -647,7 +647,7 @@ def test_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.forward(x)
out = self(x)
return {'out': out}
def test_step_end(self, outputs):
Expand Down
4 changes: 2 additions & 2 deletions tests/base/debug.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,12 @@ def my_loss(self, y_hat, y):

def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(x)
y_hat = self(x)
return {'training_loss': self.my_loss(y_hat, y)}

def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self.forward(x)
y_hat = self(x)
return {'val_loss': self.my_loss(y_hat, y)}

def validation_epoch_end(self, outputs):
Expand Down
16 changes: 8 additions & 8 deletions tests/base/mixins.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def validation_step(self, batch, batch_idx, *args, **kwargs):
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
y_hat = self(x)

loss_val = self.loss(y, y_hat)

Expand Down Expand Up @@ -114,7 +114,7 @@ def validation_step(self, batch, batch_idx, dataloader_idx, **kwargs):
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
y_hat = self(x)

loss_val = self.loss(y, y_hat)

Expand Down Expand Up @@ -273,7 +273,7 @@ def test_step(self, batch, batch_idx, *args, **kwargs):
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
y_hat = self(x)

loss_test = self.loss(y, y_hat)

Expand Down Expand Up @@ -360,7 +360,7 @@ def test_step(self, batch, batch_idx, dataloader_idx, **kwargs):
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
y_hat = self(x)

loss_test = self.loss(y, y_hat)

Expand Down Expand Up @@ -413,7 +413,7 @@ def test_step(self, batch, batch_idx, *args, **kwargs):
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
y_hat = self(x)

loss_test = self.loss(y, y_hat)

Expand Down Expand Up @@ -460,7 +460,7 @@ def test_step(self, batch, batch_idx, dataloader_idx, **kwargs):
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
y_hat = self(x)

loss_test = self.loss(y, y_hat)

Expand Down Expand Up @@ -512,7 +512,7 @@ def validation_step(self, batch, batch_idx, *args, **kwargs):
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
y_hat = self(x)

loss_val = self.loss(y, y_hat)

Expand Down Expand Up @@ -558,7 +558,7 @@ def validation_step(self, batch, batch_idx, dataloader_idx, **kwargs):
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
y_hat = self(x)

loss_val = self.loss(y, y_hat)

Expand Down
Loading

0 comments on commit d394b80

Please sign in to comment.