Skip to content

Commit

Permalink
docs cleaning - testcode (#5595)
Browse files Browse the repository at this point in the history
* testcode - python

* revert

* simple

* testcode @rst

* pl

* fix

* pip

* update

* conf

* conf

* nn.

* typo
  • Loading branch information
Borda authored Jan 26, 2021
1 parent c3587d3 commit f782230
Show file tree
Hide file tree
Showing 21 changed files with 366 additions and 367 deletions.
5 changes: 4 additions & 1 deletion .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,10 @@ references:
pyenv global 3.7.3
python --version
pip install -r requirements/docs.txt
cd docs; make clean; make html --debug --jobs 2 SPHINXOPTS="-W"
pip list
cd docs
make clean
make html --jobs 2 SPHINXOPTS="-W"
checkout_ml_testing: &checkout_ml_testing
run:
Expand Down
8 changes: 5 additions & 3 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@
PATH_ROOT = os.path.join(PATH_HERE, '..', '..')
sys.path.insert(0, os.path.abspath(PATH_ROOT))

builtins.__LIGHTNING_SETUP__ = True

SPHINX_MOCK_REQUIREMENTS = int(os.environ.get('SPHINX_MOCK_REQUIREMENTS', True))
if SPHINX_MOCK_REQUIREMENTS:
builtins.__LIGHTNING_SETUP__ = True

import pytorch_lightning # noqa: E402

Expand Down Expand Up @@ -360,7 +360,10 @@ def package_list_from_file(file):
import importlib
import os
import torch
from torch import nn
import pytorch_lightning as pl
from pytorch_lightning import LightningDataModule, LightningModule, Trainer
from pytorch_lightning.utilities import (
_NATIVE_AMP_AVAILABLE,
_APEX_AVAILABLE,
Expand All @@ -369,6 +372,5 @@ def package_list_from_file(file):
)
_TORCHVISION_AVAILABLE = importlib.util.find_spec("torchvision") is not None
"""
coverage_skip_undoc_in_source = True
4 changes: 2 additions & 2 deletions docs/source/converting.rst
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ Move the model architecture and forward pass to your :ref:`lightning_module`.

def __init__(self):
super().__init__()
self.layer_1 = torch.nn.Linear(28 * 28, 128)
self.layer_2 = torch.nn.Linear(128, 10)
self.layer_1 = nn.Linear(28 * 28, 128)
self.layer_2 = nn.Linear(128, 10)

def forward(self, x):
x = x.view(x.size(0), -1)
Expand Down
2 changes: 1 addition & 1 deletion docs/source/early_stopping.rst
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ To enable it:
- You can customize the callbacks behaviour by changing its parameters.

.. code-block:: python
.. testcode::

early_stop_callback = EarlyStopping(
monitor='val_accuracy',
Expand Down
14 changes: 7 additions & 7 deletions docs/source/hyperparameters.rst
Original file line number Diff line number Diff line change
Expand Up @@ -161,9 +161,9 @@ improve readability and reproducibility.
def __init__(self, hparams, *args, **kwargs):
super().__init__()
self.hparams = hparams
self.layer_1 = torch.nn.Linear(28 * 28, self.hparams.layer_1_dim)
self.layer_2 = torch.nn.Linear(self.hparams.layer_1_dim, self.hparams.layer_2_dim)
self.layer_3 = torch.nn.Linear(self.hparams.layer_2_dim, 10)
self.layer_1 = nn.Linear(28 * 28, self.hparams.layer_1_dim)
self.layer_2 = nn.Linear(self.hparams.layer_1_dim, self.hparams.layer_2_dim)
self.layer_3 = nn.Linear(self.hparams.layer_2_dim, 10)
def train_dataloader(self):
return DataLoader(mnist_train, batch_size=self.hparams.batch_size)
Expand All @@ -182,9 +182,9 @@ improve readability and reproducibility.
super().__init__()
self.save_hyperparameters(conf)
self.layer_1 = torch.nn.Linear(28 * 28, self.hparams.layer_1_dim)
self.layer_2 = torch.nn.Linear(self.hparams.layer_1_dim, self.hparams.layer_2_dim)
self.layer_3 = torch.nn.Linear(self.hparams.layer_2_dim, 10)
self.layer_1 = nn.Linear(28 * 28, self.hparams.layer_1_dim)
self.layer_2 = nn.Linear(self.hparams.layer_1_dim, self.hparams.layer_2_dim)
self.layer_3 = nn.Linear(self.hparams.layer_2_dim, 10)
conf = OmegaConf.create(...)
model = LitMNIST(conf)
Expand Down Expand Up @@ -225,7 +225,7 @@ polluting the ``main.py`` file, the ``LightningModule`` lets you define argument

def __init__(self, layer_1_dim, **kwargs):
super().__init__()
self.layer_1 = torch.nn.Linear(28 * 28, layer_1_dim)
self.layer_1 = nn.Linear(28 * 28, layer_1_dim)
@staticmethod
def add_model_specific_args(parent_parser):
Expand Down
16 changes: 8 additions & 8 deletions docs/source/introduction_guide.rst
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,9 @@ Let's first start with the model. In this case, we'll design a 3-layer neural ne
super().__init__()

# mnist images are (1, 28, 28) (channels, width, height)
self.layer_1 = torch.nn.Linear(28 * 28, 128)
self.layer_2 = torch.nn.Linear(128, 256)
self.layer_3 = torch.nn.Linear(256, 10)
self.layer_1 = nn.Linear(28 * 28, 128)
self.layer_2 = nn.Linear(128, 256)
self.layer_3 = nn.Linear(256, 10)

def forward(self, x):
batch_size, channels, width, height = x.size()
Expand Down Expand Up @@ -118,7 +118,7 @@ equivalent to a pure PyTorch Module except it has added functionality. However,
Now we add the training_step which has all our training loop logic

.. testcode:: python
.. testcode::

class LitMNIST(LightningModule):

Expand Down Expand Up @@ -225,7 +225,7 @@ In this case, it's better to group the full definition of a dataset into a `Data
- Val dataloader(s)
- Test dataloader(s)

.. testcode:: python
.. testcode::

class MyDataModule(LightningDataModule):

Expand Down Expand Up @@ -420,9 +420,9 @@ For clarity, we'll recall that the full LightningModule now looks like this.
class LitMNIST(LightningModule):
def __init__(self):
super().__init__()
self.layer_1 = torch.nn.Linear(28 * 28, 128)
self.layer_2 = torch.nn.Linear(128, 256)
self.layer_3 = torch.nn.Linear(256, 10)
self.layer_1 = nn.Linear(28 * 28, 128)
self.layer_2 = nn.Linear(128, 256)
self.layer_3 = nn.Linear(256, 10)
def forward(self, x):
batch_size, channels, width, height = x.size()
Expand Down
2 changes: 1 addition & 1 deletion docs/source/lightning_module.rst
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ Here are the only required methods.
...
... def __init__(self):
... super().__init__()
... self.l1 = torch.nn.Linear(28 * 28, 10)
... self.l1 = nn.Linear(28 * 28, 10)
...
... def forward(self, x):
... return torch.relu(self.l1(x.view(x.size(0), -1)))
Expand Down
19 changes: 11 additions & 8 deletions docs/source/metrics.rst
Original file line number Diff line number Diff line change
Expand Up @@ -141,9 +141,11 @@ This metrics API is independent of PyTorch Lightning. Metrics can directly be us

.. testcode::

from pytorch_lightning.metrics import Accuracy

def __init__(self):
...
metric = pl.metrics.Accuracy()
metric = Accuracy()
self.train_acc = metric.clone()
self.val_acc = metric.clone()
self.test_acc = metric.clone()
Expand All @@ -164,7 +166,6 @@ be moved to the same device as the input of the metric:

.. code-block:: python
import torch
from pytorch_lightning.metrics import Accuracy
target = torch.tensor([1, 1, 0, 0], device=torch.device("cuda", 0))
Expand All @@ -186,13 +187,15 @@ as child modules. Instead of ``list`` use :class:`~torch.nn.ModuleList` and inst

.. testcode::

from pytorch_lightning.metrics import Accuracy

class MyModule(LightningModule):
def __init__(self):
...
# valid ways metrics will be identified as child modules
self.metric1 = pl.metrics.Accuracy()
self.metric2 = torch.nn.ModuleList(pl.metrics.Accuracy())
self.metric3 = torch.nn.ModuleDict({'accuracy': Accuracy()})
self.metric1 = Accuracy()
self.metric2 = nn.ModuleList(Accuracy())
self.metric3 = nn.ModuleDict({'accuracy': Accuracy()})

def training_step(self, batch, batch_idx):
# all metrics will be on the same device as the input batch
Expand Down Expand Up @@ -222,7 +225,7 @@ from the base ``Metric`` class.

Example implementation:

.. code-block:: python
.. testcode::

from pytorch_lightning.metrics import Metric

Expand Down Expand Up @@ -281,8 +284,8 @@ Example:
.. testoutput::
:options: +NORMALIZE_WHITESPACE

{'Accuracy': tensor(0.1250),
'Precision': tensor(0.0667),
{'Accuracy': tensor(0.1250),
'Precision': tensor(0.0667),
'Recall': tensor(0.1111)}

Similarly it can also reduce the amount of code required to log multiple metrics
Expand Down
2 changes: 1 addition & 1 deletion docs/source/multi_gpu.rst
Original file line number Diff line number Diff line change
Expand Up @@ -698,7 +698,7 @@ This should be kept within the ``sequential_module`` variable within your ``Ligh
class MyModel(LightningModule):
def __init__(self):
...
self.sequential_module = torch.nn.Sequential(my_layers)
self.sequential_module = nn.Sequential(my_layers)
# Split my module across 4 gpus, one layer each
model = MyModel()
Expand Down
Loading

0 comments on commit f782230

Please sign in to comment.