From 8a88c6a7532b8363c17f038152d2518981058498 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Thu, 18 Feb 2021 16:25:33 +0100 Subject: [PATCH] fix docs links --- .github/stale.yml | 2 +- README.md | 6 +++--- notebooks/01-mnist-hello-world.ipynb | 4 ++-- notebooks/02-datamodules.ipynb | 2 +- notebooks/03-basic-gan.ipynb | 2 +- notebooks/06-mnist-tpu-training.ipynb | 4 ++-- notebooks/07-cifar10-baseline.ipynb | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/stale.yml b/.github/stale.yml index f204e5b3d26ad..84049394d3aab 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -29,7 +29,7 @@ pulls: markComment: > This pull request has been automatically marked as stale because it has not had recent activity. It will be closed in 7 days if no further activity occurs. If you need further help see our docs: - https://pytorch-lightning.readthedocs.io/en/latest/CONTRIBUTING.html#pull-request + https://pytorch-lightning.readthedocs.io/en/latest/generated/CONTRIBUTING.html#pull-request or ask the assistance of a core contributor here or on Slack. Thank you for your contributions. # Comment to post when closing a stale issue. Set to `false` to disable diff --git a/README.md b/README.md index dc23787cf010a..1e7eb5fa157bc 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ Lightning forces the following structure to your code which makes it reusable an Once you do this, you can train on multiple-GPUs, TPUs, CPUs and even in 16-bit precision without changing your code! -Get started with our [2 step guide](https://pytorch-lightning.readthedocs.io/en/stable/new-project.html) +Get started with our [2 step guide](https://pytorch-lightning.readthedocs.io/en/latest/starter/new-project.html) --- @@ -219,7 +219,7 @@ trainer.fit(autoencoder, DataLoader(train), DataLoader(val)) ``` ## Advanced features -Lightning has over [40+ advanced features](https://pytorch-lightning.readthedocs.io/en/stable/trainer.html#trainer-flags) designed for professional AI research at scale. +Lightning has over [40+ advanced features](https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-flags) designed for professional AI research at scale. Here are some examples: @@ -379,7 +379,7 @@ class LitAutoEncoder(pl.LightningModule): ## Community The lightning community is maintained by -- [16 core contributors](https://pytorch-lightning.readthedocs.io/en/latest/governance.html) who are all a mix of professional engineers, Research Scientists, and Ph.D. students from top AI labs. +- [10+ core contributors](https://pytorch-lightning.readthedocs.io/en/latest/governance.html) who are all a mix of professional engineers, Research Scientists, and Ph.D. students from top AI labs. - 400+ community contributors. Lightning is also part of the [PyTorch ecosystem](https://pytorch.org/ecosystem/) which requires projects to have solid testing, documentation and support. diff --git a/notebooks/01-mnist-hello-world.ipynb b/notebooks/01-mnist-hello-world.ipynb index 9f39693a1663a..cdcc6cd28a486 100644 --- a/notebooks/01-mnist-hello-world.ipynb +++ b/notebooks/01-mnist-hello-world.ipynb @@ -176,13 +176,13 @@ " - This is where we can download the dataset. We point to our desired dataset and ask torchvision's `MNIST` dataset class to download if the dataset isn't found there.\n", " - **Note we do not make any state assignments in this function** (i.e. `self.something = ...`)\n", "\n", - "2. [setup(stage)](https://pytorch-lightning.readthedocs.io/en/latest/lightning-module.html#setup) ⚙️\n", + "2. [setup(stage)](https://pytorch-lightning.readthedocs.io/en/latest/common/lightning-module.html#setup) ⚙️\n", " - Loads in data from file and prepares PyTorch tensor datasets for each split (train, val, test). \n", " - Setup expects a 'stage' arg which is used to separate logic for 'fit' and 'test'.\n", " - If you don't mind loading all your datasets at once, you can set up a condition to allow for both 'fit' related setup and 'test' related setup to run whenever `None` is passed to `stage` (or ignore it altogether and exclude any conditionals).\n", " - **Note this runs across all GPUs and it *is* safe to make state assignments here**\n", "\n", - "3. [x_dataloader()](https://pytorch-lightning.readthedocs.io/en/latest/lightning-module.html#data-hooks) ♻️\n", + "3. [x_dataloader()](https://pytorch-lightning.readthedocs.io/en/latest/common/lightning-module.html#data-hooks) ♻️\n", " - `train_dataloader()`, `val_dataloader()`, and `test_dataloader()` all return PyTorch `DataLoader` instances that are created by wrapping their respective datasets that we prepared in `setup()`" ] }, diff --git a/notebooks/02-datamodules.ipynb b/notebooks/02-datamodules.ipynb index b5e582b25c365..5438cd5dc5c2f 100644 --- a/notebooks/02-datamodules.ipynb +++ b/notebooks/02-datamodules.ipynb @@ -23,7 +23,7 @@ "\n", "This notebook will walk you through how to start using Datamodules.\n", "\n", - "The most up to date documentation on datamodules can be found [here](https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html).\n", + "The most up to date documentation on datamodules can be found [here](https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html).\n", "\n", "---\n", "\n", diff --git a/notebooks/03-basic-gan.ipynb b/notebooks/03-basic-gan.ipynb index 2760019b3d26b..5cee735842a08 100644 --- a/notebooks/03-basic-gan.ipynb +++ b/notebooks/03-basic-gan.ipynb @@ -91,7 +91,7 @@ "source": [ "### MNIST DataModule\n", "\n", - "Below, we define a DataModule for the MNIST Dataset. To learn more about DataModules, check out our tutorial on them or see the [latest docs](https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html)." + "Below, we define a DataModule for the MNIST Dataset. To learn more about DataModules, check out our tutorial on them or see the [latest docs](https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html)." ] }, { diff --git a/notebooks/06-mnist-tpu-training.ipynb b/notebooks/06-mnist-tpu-training.ipynb index 4b984b0437d80..359d262dfd880 100644 --- a/notebooks/06-mnist-tpu-training.ipynb +++ b/notebooks/06-mnist-tpu-training.ipynb @@ -33,7 +33,7 @@ "\n", "In this notebook, we'll train a model on TPUs. Changing one line of code is all you need to that.\n", "\n", - "The most up to documentation related to TPU training can be found [here](https://pytorch-lightning.readthedocs.io/en/latest/tpu.html).\n", + "The most up to documentation related to TPU training can be found [here](https://pytorch-lightning.readthedocs.io/en/latest/advanced/tpu.html).\n", "\n", "---\n", "\n", @@ -114,7 +114,7 @@ "source": [ "### Defining The `MNISTDataModule`\n", "\n", - "Below we define `MNISTDataModule`. You can learn more about datamodules in [docs](https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html) and [datamodule notebook](https://github.com/PyTorchLightning/pytorch-lightning/blob/master/notebooks/02-datamodules.ipynb)." + "Below we define `MNISTDataModule`. You can learn more about datamodules in [docs](https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html) and [datamodule notebook](https://github.com/PyTorchLightning/pytorch-lightning/blob/master/notebooks/02-datamodules.ipynb)." ] }, { diff --git a/notebooks/07-cifar10-baseline.ipynb b/notebooks/07-cifar10-baseline.ipynb index 2fd038c53f722..9f3209a8bbc02 100644 --- a/notebooks/07-cifar10-baseline.ipynb +++ b/notebooks/07-cifar10-baseline.ipynb @@ -185,7 +185,7 @@ }, "source": [ "### Lightning Module\n", - "Check out the [`configure_optimizers`](https://pytorch-lightning.readthedocs.io/en/stable/lightning_module.html#configure-optimizers) method to use custom Learning Rate schedulers. The OneCycleLR with SGD will get you to around 92-93% accuracy in 20-30 epochs and 93-94% accuracy in 40-50 epochs. Feel free to experiment with different LR schedules from https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate" + "Check out the [`configure_optimizers`](https://pytorch-lightning.readthedocs.io/en/stable/common/lightning_module.html#configure-optimizers) method to use custom Learning Rate schedulers. The OneCycleLR with SGD will get you to around 92-93% accuracy in 20-30 epochs and 93-94% accuracy in 40-50 epochs. Feel free to experiment with different LR schedules from https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate" ] }, {