diff --git a/.azure/ipu-tests.yml b/.azure/ipu-tests.yml
index 972bf1e95a06b..4ffdb9520c9ca 100644
--- a/.azure/ipu-tests.yml
+++ b/.azure/ipu-tests.yml
@@ -26,7 +26,8 @@ pr:
variables:
- name: poplar_sdk
- value: "poplar_sdk-ubuntu_20_04-2.3.1+793-89796d462d"
+ # https://docs.graphcore.ai/projects/poptorch-user-guide/en/latest/installation.html#version-compatibility
+ value: "poplar_sdk-ubuntu_20_04-3.0.0+1145-1b114aac3a"
jobs:
- job: testing
diff --git a/.github/checkgroup.yml b/.github/checkgroup.yml
index b3b0ac8e8a7e4..3b2a3df69efb6 100644
--- a/.github/checkgroup.yml
+++ b/.github/checkgroup.yml
@@ -29,25 +29,19 @@ subprojects:
- "pl-cpu (macOS-11, pytorch, 3.8, 1.10)"
- "pl-cpu (macOS-11, pytorch, 3.9, 1.11)"
- "pl-cpu (macOS-11, pytorch, 3.10, 1.12)"
- - "pl-cpu (macOS-11, pytorch, 3.7, 1.9, oldest)"
- "pl-cpu (macOS-11, pytorch, 3.10, 1.13, pre)"
- "pl-cpu (ubuntu-20.04, pytorch, 3.8, 1.10)"
- "pl-cpu (ubuntu-20.04, pytorch, 3.9, 1.11)"
- "pl-cpu (ubuntu-20.04, pytorch, 3.10, 1.11)"
- "pl-cpu (ubuntu-20.04, pytorch, 3.10, 1.12)"
- - "pl-cpu (ubuntu-20.04, pytorch, 3.7, 1.9, oldest)"
- "pl-cpu (ubuntu-20.04, pytorch, 3.9, 1.13, pre)"
- "pl-cpu (windows-2022, pytorch, 3.9, 1.11)"
- "pl-cpu (windows-2022, pytorch, 3.10, 1.11)"
- "pl-cpu (windows-2022, pytorch, 3.10, 1.12)"
- - "pl-cpu (windows-2022, pytorch, 3.7, 1.9, oldest)"
- "pl-cpu (windows-2022, pytorch, 3.8, 1.13, pre)"
- "pl-cpu (macOS-11, lightning, 3.10, 1.12)"
- #- "pl-cpu (macOS-11, lightning, 3.7, 1.9, oldest)"
- "pl-cpu (ubuntu-20.04, lightning, 3.10, 1.12)"
- #- "pl-cpu (ubuntu-20.04, lightning, 3.7, 1.9, oldest)"
- "pl-cpu (windows-2022, lightning, 3.10, 1.12)"
- #- "pl-cpu (windows-2022, lightning, 3.7, 1.9, oldest)"
- "pytorch-lightning (GPUs)"
- "pytorch-lightning (HPUs)"
- "pytorch-lightning (IPUs)"
@@ -64,25 +58,19 @@ subprojects:
- "pl-cpu (macOS-11, pytorch, 3.8, 1.10)"
- "pl-cpu (macOS-11, pytorch, 3.9, 1.11)"
- "pl-cpu (macOS-11, pytorch, 3.10, 1.12)"
- - "pl-cpu (macOS-11, pytorch, 3.7, 1.9, oldest)"
- "pl-cpu (macOS-11, pytorch, 3.10, 1.13, pre)"
- "pl-cpu (ubuntu-20.04, pytorch, 3.8, 1.10)"
- "pl-cpu (ubuntu-20.04, pytorch, 3.9, 1.11)"
- "pl-cpu (ubuntu-20.04, pytorch, 3.10, 1.11)"
- "pl-cpu (ubuntu-20.04, pytorch, 3.10, 1.12)"
- - "pl-cpu (ubuntu-20.04, pytorch, 3.7, 1.9, oldest)"
- "pl-cpu (ubuntu-20.04, pytorch, 3.9, 1.13, pre)"
- "pl-cpu (windows-2022, pytorch, 3.9, 1.11)"
- "pl-cpu (windows-2022, pytorch, 3.10, 1.11)"
- "pl-cpu (windows-2022, pytorch, 3.10, 1.12)"
- - "pl-cpu (windows-2022, pytorch, 3.7, 1.9, oldest)"
- "pl-cpu (windows-2022, pytorch, 3.8, 1.13, pre)"
- "pl-cpu (macOS-11, lightning, 3.10, 1.12)"
- #- "pl-cpu (macOS-11, lightning, 3.7, 1.9, oldest)"
- "pl-cpu (ubuntu-20.04, lightning, 3.10, 1.12)"
- #- "pl-cpu (ubuntu-20.04, lightning, 3.7, 1.9, oldest)"
- "pl-cpu (windows-2022, lightning, 3.10, 1.12)"
- #- "pl-cpu (windows-2022, lightning, 3.7, 1.9, oldest)"
- id: "pytorch_lightning: Slow workflow"
paths:
@@ -144,7 +132,7 @@ subprojects:
- "build-cuda (3.9, 1.11, 11.3.1)"
- "build-cuda (3.9, 1.12, 11.6.1)"
- "build-hpu (1.5.0, 1.11.0)"
- - "build-ipu (3.9, 1.9)"
+ - "build-ipu (3.9, 1.10)"
- "build-NGC"
- "build-pl (3.9, 1.10, 11.3.1)"
- "build-pl (3.9, 1.11, 11.3.1)"
@@ -164,16 +152,12 @@ subprojects:
- "lite-cpu (macOS-11, lite, 3.8, 1.10)"
- "lite-cpu (macOS-11, lite, 3.10, 1.12)"
- "lite-cpu (macOS-11, lite, 3.10, 1.13, pre)"
- - "lite-cpu (macOS-11, lite, 3.7, 1.9, oldest)"
- "lite-cpu (ubuntu-20.04, lite, 3.8, 1.11)"
- "lite-cpu (ubuntu-20.04, lite, 3.10, 1.12)"
- - "lite-cpu (ubuntu-20.04, lite, 3.7, 1.9, oldest)"
- "lite-cpu (ubuntu-20.04, lite, 3.9, 1.13, pre)"
- - "lite-cpu (windows-2022, lite, 3.8, 1.9)"
- "lite-cpu (windows-2022, lite, 3.9, 1.10)"
- "lite-cpu (windows-2022, lite, 3.10, 1.11)"
- "lite-cpu (windows-2022, lite, 3.10, 1.12)"
- - "lite-cpu (windows-2022, lite, 3.7, 1.9, oldest)"
- "lite-cpu (windows-2022, lite, 3.8, 1.13, pre)"
- "lite-cpu (macOS-11, lightning, 3.8, 1.12)"
- "lite-cpu (ubuntu-20.04, lightning, 3.8, 1.12)"
@@ -183,25 +167,19 @@ subprojects:
- "pl-cpu (macOS-11, pytorch, 3.8, 1.10)"
- "pl-cpu (macOS-11, pytorch, 3.9, 1.11)"
- "pl-cpu (macOS-11, pytorch, 3.10, 1.12)"
- - "pl-cpu (macOS-11, pytorch, 3.7, 1.9, oldest)"
- "pl-cpu (macOS-11, pytorch, 3.10, 1.13, pre)"
- "pl-cpu (ubuntu-20.04, pytorch, 3.8, 1.10)"
- "pl-cpu (ubuntu-20.04, pytorch, 3.9, 1.11)"
- "pl-cpu (ubuntu-20.04, pytorch, 3.10, 1.11)"
- "pl-cpu (ubuntu-20.04, pytorch, 3.10, 1.12)"
- - "pl-cpu (ubuntu-20.04, pytorch, 3.7, 1.9, oldest)"
- "pl-cpu (ubuntu-20.04, pytorch, 3.9, 1.13, pre)"
- "pl-cpu (windows-2022, pytorch, 3.9, 1.11)"
- "pl-cpu (windows-2022, pytorch, 3.10, 1.11)"
- "pl-cpu (windows-2022, pytorch, 3.10, 1.12)"
- - "pl-cpu (windows-2022, pytorch, 3.7, 1.9, oldest)"
- "pl-cpu (windows-2022, pytorch, 3.8, 1.13, pre)"
- "pl-cpu (macOS-11, lightning, 3.10, 1.12)"
- #- "pl-cpu (macOS-11, lightning, 3.7, 1.9, oldest)"
- "pl-cpu (ubuntu-20.04, lightning, 3.10, 1.12)"
- #- "pl-cpu (ubuntu-20.04, lightning, 3.7, 1.9, oldest)"
- "pl-cpu (windows-2022, lightning, 3.10, 1.12)"
- #- "pl-cpu (windows-2022, lightning, 3.7, 1.9, oldest)"
- "pytorch-lightning (GPUs)"
- "pytorch-lightning (HPUs)"
- "pytorch-lightning (IPUs)"
@@ -218,16 +196,12 @@ subprojects:
- "lite-cpu (macOS-11, lite, 3.8, 1.10)"
- "lite-cpu (macOS-11, lite, 3.10, 1.12)"
- "lite-cpu (macOS-11, lite, 3.10, 1.13, pre)"
- - "lite-cpu (macOS-11, lite, 3.7, 1.9, oldest)"
- "lite-cpu (ubuntu-20.04, lite, 3.8, 1.11)"
- "lite-cpu (ubuntu-20.04, lite, 3.10, 1.12)"
- - "lite-cpu (ubuntu-20.04, lite, 3.7, 1.9, oldest)"
- "lite-cpu (ubuntu-20.04, lite, 3.9, 1.13, pre)"
- - "lite-cpu (windows-2022, lite, 3.8, 1.9)"
- "lite-cpu (windows-2022, lite, 3.9, 1.10)"
- "lite-cpu (windows-2022, lite, 3.10, 1.11)"
- "lite-cpu (windows-2022, lite, 3.10, 1.12)"
- - "lite-cpu (windows-2022, lite, 3.7, 1.9, oldest)"
- "lite-cpu (windows-2022, lite, 3.8, 1.13, pre)"
- "lite-cpu (macOS-11, lightning, 3.8, 1.12)"
- "lite-cpu (ubuntu-20.04, lightning, 3.8, 1.12)"
diff --git a/.github/workflows/ci-lite-tests.yml b/.github/workflows/ci-lite-tests.yml
index cfd9c9a0b4fb7..49a967526aa2e 100644
--- a/.github/workflows/ci-lite-tests.yml
+++ b/.github/workflows/ci-lite-tests.yml
@@ -36,21 +36,18 @@ jobs:
matrix:
include:
# assign python and pytorch version combinations to operating systems (arbitrarily)
- # note: there's no distribution of Torch==1.9 for Python>=3.9 or torch==1.10 for Python>=3.10
+ # note: there's no distribution of torch==1.10 for Python>=3.10
- {os: "macOS-11", pkg-name: "lite", python-version: "3.9", pytorch-version: "1.11"}
- {os: "macOS-11", pkg-name: "lite", python-version: "3.8", pytorch-version: "1.10"}
- {os: "windows-2022", pkg-name: "lite", python-version: "3.10", pytorch-version: "1.11"}
- {os: "windows-2022", pkg-name: "lite", python-version: "3.9", pytorch-version: "1.10"}
- - {os: "windows-2022", pkg-name: "lite", python-version: "3.8", pytorch-version: "1.9"}
- {os: "ubuntu-20.04", pkg-name: "lite", python-version: "3.8", pytorch-version: "1.11"}
# only run PyTorch latest with Python latest
- {os: "macOS-11", pkg-name: "lite", python-version: "3.10", pytorch-version: "1.12"}
- {os: "ubuntu-20.04", pkg-name: "lite", python-version: "3.10", pytorch-version: "1.12"}
- {os: "windows-2022", pkg-name: "lite", python-version: "3.10", pytorch-version: "1.12"}
# "oldest" versions tests, only on minimum Python
- - {os: "macOS-11", pkg-name: "lite", python-version: "3.7", pytorch-version: "1.9", requires: "oldest"}
- - {os: "ubuntu-20.04", pkg-name: "lite", python-version: "3.7", pytorch-version: "1.9", requires: "oldest"}
- - {os: "windows-2022", pkg-name: "lite", python-version: "3.7", pytorch-version: "1.9", requires: "oldest"}
+ # TODO: add back with 1.13
# release-candidate tests, mixed Python versions
- {os: "macOS-11", pkg-name: "lite", python-version: "3.10", pytorch-version: "1.13", release: "pre"}
- {os: "ubuntu-20.04", pkg-name: "lite", python-version: "3.9", pytorch-version: "1.13", release: "pre"}
diff --git a/.github/workflows/ci-pytorch-dockers.yml b/.github/workflows/ci-pytorch-dockers.yml
index 4682dd5b3b5ac..6ac2f5d280a42 100644
--- a/.github/workflows/ci-pytorch-dockers.yml
+++ b/.github/workflows/ci-pytorch-dockers.yml
@@ -136,7 +136,7 @@ jobs:
matrix:
include:
# the config used in 'dockers/ci-runner-ipu/Dockerfile'
- - {python_version: "3.9", pytorch_version: "1.9"}
+ - {python_version: "3.9", pytorch_version: "1.10"}
steps:
- uses: actions/checkout@v3
- uses: docker/setup-buildx-action@v2
diff --git a/.github/workflows/ci-pytorch-tests.yml b/.github/workflows/ci-pytorch-tests.yml
index 91d0a73452e1c..1b3aba144ad7e 100644
--- a/.github/workflows/ci-pytorch-tests.yml
+++ b/.github/workflows/ci-pytorch-tests.yml
@@ -40,7 +40,7 @@ jobs:
matrix:
include:
# assign python and pytorch version combinations to operating systems (arbitrarily)
- # note: there's no distribution of Torch==1.9 for Python>=3.9 or torch==1.10 for Python>=3.10
+ # note: there's no distribution of torch==1.10 for Python>=3.10
- {os: "macOS-11", pkg-name: "pytorch", python-version: "3.9", pytorch-version: "1.11"}
- {os: "macOS-11", pkg-name: "pytorch", python-version: "3.8", pytorch-version: "1.10"}
- {os: "ubuntu-20.04", pkg-name: "pytorch", python-version: "3.8", pytorch-version: "1.10"}
@@ -57,12 +57,7 @@ jobs:
- {os: "windows-2022", pkg-name: "pytorch", python-version: "3.10", pytorch-version: "1.12"}
- {os: "windows-2022", pkg-name: "lightning", python-version: "3.10", pytorch-version: "1.12"}
# "oldest" versions tests, only on minimum Python
- - {os: "macOS-11", pkg-name: "pytorch", python-version: "3.7", pytorch-version: "1.9", requires: "oldest"}
- - {os: "macOS-11", pkg-name: "lightning", python-version: "3.7", pytorch-version: "1.9", requires: "oldest"}
- - {os: "ubuntu-20.04", pkg-name: "pytorch", python-version: "3.7", pytorch-version: "1.9", requires: "oldest"}
- - {os: "ubuntu-20.04", pkg-name: "lightning", python-version: "3.7", pytorch-version: "1.9", requires: "oldest"}
- - {os: "windows-2022", pkg-name: "pytorch", python-version: "3.7", pytorch-version: "1.9", requires: "oldest"}
- - {os: "windows-2022", pkg-name: "lightning", python-version: "3.7", pytorch-version: "1.9", requires: "oldest"}
+ # TODO: add back with 1.13
# release-candidate tests, mixed Python versions
- {os: "macOS-11", pkg-name: "pytorch", python-version: "3.10", pytorch-version: "1.13", release: "pre"}
- {os: "ubuntu-20.04", pkg-name: "pytorch", python-version: "3.9", pytorch-version: "1.13", release: "pre"}
diff --git a/README.md b/README.md
index 66f1f28be4275..e862c718d12a4 100644
--- a/README.md
+++ b/README.md
@@ -89,15 +89,15 @@ Lightning is rigorously tested across multiple CPUs, GPUs, TPUs, IPUs, and HPUs
-| System / PyTorch ver. | 1.9 | 1.10 | 1.12 (latest) |
-| :------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
-| Linux py3.7 \[GPUs\*\*\] | - | - | - |
-| Linux py3.7 \[TPUs\*\*\*\] | [![Test](https://github.com/Lightning-AI/lightning/actions/workflows/tpu-tests.yml/badge.svg?branch=master&event=push)](https://github.com/Lightning-AI/lightning/actions/workflows/tpu-tests.yml) | - | - |
-| Linux py3.8 \[IPUs\] | [![Build Status]()](https://dev.azure.com/Lightning-AI/lightning/_build/latest?definitionId=25&branchName=master) | - | - |
-| Linux py3.8 \[HPUs\] | - | [![Build Status]()](https://dev.azure.com/Lightning-AI/lightning/_build/latest?definitionId=26&branchName=master) | - |
-| Linux py3.{7,9} | - | - | [![Test](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml/badge.svg?branch=master&event=push)](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml) |
-| OSX py3.{7,9} | - | - | [![Test](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml/badge.svg?branch=master&event=push)](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml) |
-| Windows py3.{7,9} | - | - | [![Test](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml/badge.svg?branch=master&event=push)](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml) |
+| System / PyTorch ver. | 1.10 | 1.12 |
+| :------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| Linux py3.7 \[GPUs\*\*\] | - | - |
+| Linux py3.7 \[TPUs\*\*\*\] | - | - |
+| Linux py3.8 \[IPUs\] | - | - |
+| Linux py3.8 \[HPUs\] | [![Build Status]()](https://dev.azure.com/Lightning-AI/lightning/_build/latest?definitionId=26&branchName=master) | - |
+| Linux py3.{7,9} | - | [![Test](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml/badge.svg?branch=master&event=push)](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml) |
+| OSX py3.{7,9} | - | [![Test](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml/badge.svg?branch=master&event=push)](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml) |
+| Windows py3.{7,9} | - | [![Test](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml/badge.svg?branch=master&event=push)](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml) |
- _\*\* tests run on two NVIDIA P100_
- _\*\*\* tests run on Google GKE TPUv2/3. TPU py3.7 means we support Colab and Kaggle env._
diff --git a/dockers/base-cuda/Dockerfile b/dockers/base-cuda/Dockerfile
index 9a2e0455ff40f..5f8c12b8fa29d 100644
--- a/dockers/base-cuda/Dockerfile
+++ b/dockers/base-cuda/Dockerfile
@@ -140,23 +140,16 @@ RUN \
RUN \
# install ColossalAI
- SHOULD_INSTALL_COLOSSAL=$(python -c "import torch; print(1 if int(torch.__version__.split('.')[1]) > 9 else 0)") && \
- if [[ "$SHOULD_INSTALL_COLOSSAL" = "1" ]]; then \
- PYTORCH_VERSION_COLOSSALAI=$(python -c "import torch; print(torch.__version__.split('+')[0][:4])") ; \
- CUDA_VERSION_MM_COLOSSALAI=$(python -c "import torch ; print(''.join(map(str, torch.version.cuda)))") ; \
- CUDA_VERSION_COLOSSALAI=$(python -c "print([ver for ver in [11.3, 11.1] if $CUDA_VERSION_MM_COLOSSALAI >= ver][0])") ; \
- pip install "colossalai==0.1.10+torch${PYTORCH_VERSION_COLOSSALAI}cu${CUDA_VERSION_COLOSSALAI}" --find-links https://release.colossalai.org ; \
- python -c "import colossalai; print(colossalai.__version__)" ; \
- fi
+ PYTORCH_VERSION_COLOSSALAI=$(python -c "import torch; print(torch.__version__.split('+')[0][:4])") ; \
+ CUDA_VERSION_MM_COLOSSALAI=$(python -c "import torch ; print(''.join(map(str, torch.version.cuda)))") ; \
+ CUDA_VERSION_COLOSSALAI=$(python -c "print([ver for ver in [11.3, 11.1] if $CUDA_VERSION_MM_COLOSSALAI >= ver][0])") ; \
+ pip install "colossalai==0.1.10+torch${PYTORCH_VERSION_COLOSSALAI}cu${CUDA_VERSION_COLOSSALAI}" --find-links https://release.colossalai.org ; \
+ python -c "import colossalai; print(colossalai.__version__)" ; \
RUN \
# install rest of strategies
# remove colossalai from requirements since they are installed separately
- SHOULD_INSTALL_COLOSSAL=$(python -c "import torch; print(1 if int(torch.__version__.split('.')[1]) > 9 else 0)") && \
- if [[ "$SHOULD_INSTALL_COLOSSAL" = "0" ]]; then \
- python -c "fname = 'requirements/pytorch/strategies.txt' ; lines = [line for line in open(fname).readlines() if 'colossalai' not in line] ; open(fname, 'w').writelines(lines)" ; \
- fi && \
- echo "$SHOULD_INSTALL_COLOSSAL" && \
+ python -c "fname = 'requirements/pytorch/strategies.txt' ; lines = [line for line in open(fname).readlines() if 'colossalai' not in line] ; open(fname, 'w').writelines(lines)" ; \
cat requirements/pytorch/strategies.txt && \
pip install -r requirements/pytorch/devel.txt -r requirements/pytorch/strategies.txt --no-cache-dir --find-links https://download.pytorch.org/whl/cu${CUDA_VERSION_MM}/torch_stable.html
diff --git a/dockers/base-ipu/Dockerfile b/dockers/base-ipu/Dockerfile
index 3132766cfb6e1..bde6cdf24cd3c 100644
--- a/dockers/base-ipu/Dockerfile
+++ b/dockers/base-ipu/Dockerfile
@@ -18,7 +18,7 @@ LABEL maintainer="Lightning-AI "
ARG PYTHON_VERSION=3.9
ARG CONDA_VERSION=4.9.2
-ARG PYTORCH_VERSION=1.9
+ARG PYTORCH_VERSION=1.10
SHELL ["/bin/bash", "-c"]
diff --git a/dockers/ci-runner-ipu/Dockerfile b/dockers/ci-runner-ipu/Dockerfile
index eed18b1596c30..1e773425ae426 100644
--- a/dockers/ci-runner-ipu/Dockerfile
+++ b/dockers/ci-runner-ipu/Dockerfile
@@ -13,7 +13,7 @@
# limitations under the License.
ARG PYTHON_VERSION=3.9
-ARG PYTORCH_VERSION=1.9
+ARG PYTORCH_VERSION=1.10
FROM pytorchlightning/pytorch_lightning:base-ipu-py${PYTHON_VERSION}-torch${PYTORCH_VERSION}
diff --git a/dockers/release/Dockerfile b/dockers/release/Dockerfile
index ef405a79883d3..2ab2f88fe552c 100644
--- a/dockers/release/Dockerfile
+++ b/dockers/release/Dockerfile
@@ -39,10 +39,6 @@ RUN \
fi && \
# otherwise there is collision with folder name ans pkg name on Pypi
cd lightning && \
- SHOULD_INSTALL_COLOSSAL=$(python -c "import torch; print(1 if int(torch.__version__.split('.')[1]) > 9 else 0)") && \
- if [[ "$SHOULD_INSTALL_COLOSSAL" = "0" ]]; then \
- python -c "fname = 'requirements/pytorch/strategies.txt' ; lines = [line for line in open(fname).readlines() if 'colossalai' not in line] ; open(fname, 'w').writelines(lines)" ; \
- fi && \
pip install .["extra","loggers","strategies"] --no-cache-dir --find-links https://release.colossalai.org && \
cd .. && \
rm -rf lightning
diff --git a/docs/source-pytorch/accelerators/tpu_basic.rst b/docs/source-pytorch/accelerators/tpu_basic.rst
index a4b13bde1900d..3f27cf500b927 100644
--- a/docs/source-pytorch/accelerators/tpu_basic.rst
+++ b/docs/source-pytorch/accelerators/tpu_basic.rst
@@ -88,7 +88,7 @@ To get a TPU on colab, follow these steps:
.. code-block::
- !pip install cloud-tpu-client==0.10 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.9-cp37-cp37m-linux_x86_64.whl
+ !pip install cloud-tpu-client https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.12-cp39-cp39m-linux_x86_64.whl
5. Once the above is done, install PyTorch Lightning.
diff --git a/docs/source-pytorch/advanced/model_parallel.rst b/docs/source-pytorch/advanced/model_parallel.rst
index ca6d2be30faa5..a9922f4274154 100644
--- a/docs/source-pytorch/advanced/model_parallel.rst
+++ b/docs/source-pytorch/advanced/model_parallel.rst
@@ -1080,9 +1080,6 @@ Enable `PowerSGD for multi-node throughput improvement =1.9.0
-
.. code-block:: python
from pytorch_lightning import Trainer
diff --git a/docs/source-pytorch/versioning.rst b/docs/source-pytorch/versioning.rst
index d9ba06d58e453..2695be045a267 100644
--- a/docs/source-pytorch/versioning.rst
+++ b/docs/source-pytorch/versioning.rst
@@ -64,4 +64,4 @@ PyTorch Lightning follows `NEP 29 =3.7
- pip>20.1
- numpy>=1.17.2
- - pytorch>=1.9.*
+ - pytorch>=1.10.*
- future>=0.17.1
- PyYAML>=5.1
- tqdm>=4.41.0
@@ -43,7 +43,7 @@ dependencies:
- omegaconf>=2.0.5
# Examples
- - torchvision>=0.10.*
+ - torchvision>=0.11.*
- pip:
- mlflow>=1.0.0
diff --git a/requirements/lite/base.txt b/requirements/lite/base.txt
index b342ecacc0927..676837f04bda1 100644
--- a/requirements/lite/base.txt
+++ b/requirements/lite/base.txt
@@ -2,7 +2,7 @@
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
numpy>=1.17.2, <1.23.1
-torch>=1.9.*, <1.13.0
+torch>=1.10.*, <1.13.0
fsspec[http]>2021.06.0, <2022.6.0
packaging>=17.0, <=21.3
typing-extensions>=4.0.0, <=4.4.0
diff --git a/requirements/pytorch/adjust-versions.py b/requirements/pytorch/adjust-versions.py
index 9d9f4047e6fc4..59869df16ac31 100644
--- a/requirements/pytorch/adjust-versions.py
+++ b/requirements/pytorch/adjust-versions.py
@@ -12,8 +12,6 @@
dict(torch="1.10.2", torchvision="0.11.3"),
dict(torch="1.10.1", torchvision="0.11.2"),
dict(torch="1.10.0", torchvision="0.11.1"),
- dict(torch="1.9.1", torchvision="0.10.1"),
- dict(torch="1.9.0", torchvision="0.10.0"),
]
@@ -62,16 +60,16 @@ def test_check():
torchmetrics>=0.4.1
"""
expected = """
- torch==1.9.1
- torch==1.9.1
- torch==1.9.1
- torch==1.9.1
+ torch==1.12.1
+ torch==1.12.1
+ torch==1.12.1
+ torch==1.12.1
future>=0.17.1
pytorch==1.5.6+123dev0
- torchvision==0.10.1
+ torchvision==0.13.1
torchmetrics>=0.4.1
""".strip()
- actual = main(requirements, "1.9")
+ actual = main(requirements, "1.12")
assert actual == expected, (actual, expected)
diff --git a/requirements/pytorch/base.txt b/requirements/pytorch/base.txt
index e3eae1cd66ce8..5587b8db1963d 100644
--- a/requirements/pytorch/base.txt
+++ b/requirements/pytorch/base.txt
@@ -2,7 +2,7 @@
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
numpy>=1.17.2, <1.23.1
-torch>=1.9.*, <1.13.0
+torch>=1.10.*, <1.13.0
tqdm>=4.57.0, <4.65.0
PyYAML>=5.4, <=6.0
fsspec[http]>2021.06.0, <2022.8.0
diff --git a/requirements/pytorch/examples.txt b/requirements/pytorch/examples.txt
index 9a6153d0f9110..c749c83faedb9 100644
--- a/requirements/pytorch/examples.txt
+++ b/requirements/pytorch/examples.txt
@@ -1,6 +1,6 @@
# NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
-torchvision>=0.10.*, <=0.13.0
+torchvision>=0.11.*, <=0.14.0
gym[classic_control]>=0.17.0, <0.26.3
ipython[all] <8.6.1
diff --git a/src/lightning_lite/plugins/environments/torchelastic.py b/src/lightning_lite/plugins/environments/torchelastic.py
index f33c7dab0fea7..93cf169e25413 100644
--- a/src/lightning_lite/plugins/environments/torchelastic.py
+++ b/src/lightning_lite/plugins/environments/torchelastic.py
@@ -18,7 +18,6 @@
import torch.distributed
from lightning_lite.plugins.environments.cluster_environment import ClusterEnvironment
-from lightning_lite.utilities.imports import _TORCH_GREATER_EQUAL_1_9_1
from lightning_lite.utilities.rank_zero import rank_zero_warn
log = logging.getLogger(__name__)
@@ -51,11 +50,8 @@ def main_port(self) -> int:
@staticmethod
def detect() -> bool:
"""Returns ``True`` if the current process was launched using the torchelastic command."""
- if _TORCH_GREATER_EQUAL_1_9_1:
- # if not available (for example on MacOS), `is_torchelastic_launched` is not defined
- return torch.distributed.is_available() and torch.distributed.is_torchelastic_launched()
- required_env_vars = {"RANK", "GROUP_RANK", "LOCAL_RANK", "LOCAL_WORLD_SIZE"}
- return required_env_vars.issubset(os.environ.keys())
+ # if not available (for example on MacOS), `is_torchelastic_launched` is not defined
+ return torch.distributed.is_available() and torch.distributed.is_torchelastic_launched()
def world_size(self) -> int:
return int(os.environ["WORLD_SIZE"])
diff --git a/src/lightning_lite/utilities/imports.py b/src/lightning_lite/utilities/imports.py
index 8bf085cb10be9..83f3b76b1e9a4 100644
--- a/src/lightning_lite/utilities/imports.py
+++ b/src/lightning_lite/utilities/imports.py
@@ -27,7 +27,6 @@
_PYTHON_GREATER_EQUAL_3_8_0 = (sys.version_info.major, sys.version_info.minor) >= (3, 8)
_PYTHON_GREATER_EQUAL_3_10_0 = (sys.version_info.major, sys.version_info.minor) >= (3, 10)
-_TORCH_GREATER_EQUAL_1_9_1 = compare_version("torch", operator.ge, "1.9.1")
_TORCH_GREATER_EQUAL_1_10 = compare_version("torch", operator.ge, "1.10.0")
_TORCH_LESSER_EQUAL_1_10_2 = compare_version("torch", operator.le, "1.10.2")
_TORCH_GREATER_EQUAL_1_11 = compare_version("torch", operator.ge, "1.11.0")
diff --git a/src/pytorch_lightning/CHANGELOG.md b/src/pytorch_lightning/CHANGELOG.md
index 2a9d654c99b72..433dc033d6653 100644
--- a/src/pytorch_lightning/CHANGELOG.md
+++ b/src/pytorch_lightning/CHANGELOG.md
@@ -27,6 +27,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
### Changed
+- Drop PyTorch 1.9 support ([#15347](https://github.com/Lightning-AI/lightning/pull/15347))
+
- From now on, Lightning Trainer and `LightningModule.load_from_checkpoint` automatically upgrade the loaded checkpoint if it was produced in an old version of Lightning ([#15237](https://github.com/Lightning-AI/lightning/pull/15237))
- `Trainer.{validate,test,predict}(ckpt_path=...)` no longer restores the `Trainer.global_step` and `trainer.current_epoch` value from the checkpoints - From now on, only `Trainer.fit` will restore this value ([#15532](https://github.com/Lightning-AI/lightning/pull/15532))
@@ -164,6 +166,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Deprecated custom `pl.utilities.distributed.AllGatherGrad` implementation in favor of PyTorch's ([#15364](https://github.com/Lightnign-AI/lightning/pull/15364))
+
### Removed
- Removed the deprecated `Trainer.training_type_plugin` property in favor of `Trainer.strategy` ([#14011](https://github.com/Lightning-AI/lightning/pull/14011))
diff --git a/src/pytorch_lightning/README.md b/src/pytorch_lightning/README.md
index 54c3db39c4973..cd5698821b253 100644
--- a/src/pytorch_lightning/README.md
+++ b/src/pytorch_lightning/README.md
@@ -78,15 +78,15 @@ Lightning is rigorously tested across multiple CPUs, GPUs, TPUs, IPUs, and HPUs
-| System / PyTorch ver. | 1.9 | 1.10 | 1.12 (latest) |
-| :------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
-| Linux py3.7 \[GPUs\*\*\] | - | - | - |
-| Linux py3.7 \[TPUs\*\*\*\] | [![Test](https://github.com/Lightning-AI/lightning/actions/workflows/tpu-tests.yml/badge.svg?branch=master&event=push)](https://github.com/Lightning-AI/lightning/actions/workflows/tpu-tests.yml) | - | - |
-| Linux py3.8 \[IPUs\] | [![Build Status]()](https://dev.azure.com/Lightning-AI/lightning/_build/latest?definitionId=25&branchName=master) | - | - |
-| Linux py3.8 \[HPUs\] | - | [![Build Status]()](https://dev.azure.com/Lightning-AI/lightning/_build/latest?definitionId=26&branchName=master) | - |
-| Linux py3.{7,9} | - | - | [![Test](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml/badge.svg?branch=master&event=push)](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml) |
-| OSX py3.{7,9} | - | - | [![Test](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml/badge.svg?branch=master&event=push)](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml) |
-| Windows py3.{7,9} | - | - | [![Test](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml/badge.svg?branch=master&event=push)](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml) |
+| System / PyTorch ver. | 1.10 | 1.12 |
+| :------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| Linux py3.7 \[GPUs\*\*\] | - | - |
+| Linux py3.7 \[TPUs\*\*\*\] | - | - |
+| Linux py3.8 \[IPUs\] | - | - |
+| Linux py3.8 \[HPUs\] | [![Build Status]()](https://dev.azure.com/Lightning-AI/lightning/_build/latest?definitionId=26&branchName=master) | - |
+| Linux py3.{7,9} | - | [![Test](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml/badge.svg?branch=master&event=push)](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml) |
+| OSX py3.{7,9} | - | [![Test](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml/badge.svg?branch=master&event=push)](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml) |
+| Windows py3.{7,9} | - | [![Test](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml/badge.svg?branch=master&event=push)](https://github.com/Lightning-AI/lightning/actions/workflows/ci-pytorch-tests.yml) |
- _\*\* tests run on two NVIDIA P100_
- _\*\*\* tests run on Google GKE TPUv2/3. TPU py3.7 means we support Colab and Kaggle env._
diff --git a/src/pytorch_lightning/utilities/imports.py b/src/pytorch_lightning/utilities/imports.py
index 4e3cea186f66c..803b335fa2f2d 100644
--- a/src/pytorch_lightning/utilities/imports.py
+++ b/src/pytorch_lightning/utilities/imports.py
@@ -22,12 +22,12 @@
_IS_WINDOWS = platform.system() == "Windows"
_PYTHON_GREATER_EQUAL_3_8_0 = (sys.version_info.major, sys.version_info.minor) >= (3, 8)
_PYTHON_GREATER_EQUAL_3_10_0 = (sys.version_info.major, sys.version_info.minor) >= (3, 10)
-_TORCH_GREATER_EQUAL_1_9_1 = compare_version("torch", operator.ge, "1.9.1")
_TORCH_GREATER_EQUAL_1_10 = compare_version("torch", operator.ge, "1.10.0")
_TORCH_LESSER_EQUAL_1_10_2 = compare_version("torch", operator.le, "1.10.2")
_TORCH_GREATER_EQUAL_1_11 = compare_version("torch", operator.ge, "1.11.0")
_TORCH_GREATER_EQUAL_1_12 = compare_version("torch", operator.ge, "1.12.0")
-_TORCH_GREATER_EQUAL_1_13 = compare_version("torch", operator.ge, "1.13.0", use_base_version=True)
+_TORCH_GREATER_EQUAL_1_13 = compare_version("torch", operator.ge, "1.13.0")
+_TORCH_GREATER_EQUAL_1_14 = compare_version("torch", operator.ge, "1.14.0", use_base_version=True)
_APEX_AVAILABLE = module_available("apex.amp")
_DALI_AVAILABLE = module_available("nvidia.dali")
diff --git a/tests/README.md b/tests/README.md
index 64ecd3a7a0775..723a47d4a483a 100644
--- a/tests/README.md
+++ b/tests/README.md
@@ -86,20 +86,3 @@ coverage report -m
# exporting results
coverage xml
```
-
-## Building test image
-
-You can build it on your own, note it takes lots of time, be prepared.
-
-```bash
-git clone
-docker image build -t pytorch_lightning:devel-torch1.9 -f dockers/cuda-extras/Dockerfile --build-arg TORCH_VERSION=1.9 .
-```
-
-To build other versions, select different Dockerfile.
-
-```bash
-docker image list
-docker run --rm -it pytorch_lightning:devel-torch1.9 bash
-docker image rm pytorch_lightning:devel-torch1.9
-```
diff --git a/tests/tests_lite/plugins/collectives/test_torch_collective.py b/tests/tests_lite/plugins/collectives/test_torch_collective.py
index 3b2235b12287f..97f433f40eeed 100644
--- a/tests/tests_lite/plugins/collectives/test_torch_collective.py
+++ b/tests/tests_lite/plugins/collectives/test_torch_collective.py
@@ -66,17 +66,10 @@ def check_destroy_group():
),
("barrier", {"device_ids": [0]}, None),
("all_gather_object", {"object_list": [PASSED_OBJECT], "obj": PASSED_OBJECT}, "object_list"),
- pytest.param(
- "broadcast_object_list",
- {"object_list": [PASSED_OBJECT], "src": 0},
- "object_list",
- marks=RunIf(max_torch="1.10"),
- ),
- pytest.param(
+ (
"broadcast_object_list",
{"object_list": [PASSED_OBJECT], "src": 0, "device": torch.device("cpu")},
"object_list",
- marks=RunIf(min_torch="1.10"),
),
(
"gather_object",
diff --git a/tests/tests_lite/plugins/environments/test_torchelastic.py b/tests/tests_lite/plugins/environments/test_torchelastic.py
index 9a28784d2cbde..45d74632a7ccc 100644
--- a/tests/tests_lite/plugins/environments/test_torchelastic.py
+++ b/tests/tests_lite/plugins/environments/test_torchelastic.py
@@ -16,7 +16,6 @@
from unittest import mock
import pytest
-from tests_lite.helpers.runif import RunIf
from lightning_lite.plugins.environments import TorchElasticEnvironment
@@ -71,27 +70,8 @@ def test_attributes_from_environment_variables(caplog):
assert "setting world size is not allowed" in caplog.text
-@RunIf(max_torch="1.9.0")
-def test_detect_before_1_9_1():
- """Test the detection of a torchelastic environment configuration before 1.9.1."""
- with mock.patch.dict(os.environ, {}, clear=True):
- assert not TorchElasticEnvironment.detect()
-
- with mock.patch.dict(
- os.environ,
- {
- "RANK": "",
- "GROUP_RANK": "",
- "LOCAL_RANK": "",
- "LOCAL_WORLD_SIZE": "",
- },
- ):
- assert TorchElasticEnvironment.detect()
-
-
-@RunIf(min_torch="1.9.1")
-def test_detect_after_1_9_1():
- """Test the detection of a torchelastic environment configuration after 1.9.1."""
+def test_detect():
+ """Test the detection of a torchelastic environment configuration."""
with mock.patch.dict(os.environ, {}, clear=True):
assert not TorchElasticEnvironment.detect()
diff --git a/tests/tests_lite/plugins/precision/test_native_amp.py b/tests/tests_lite/plugins/precision/test_native_amp.py
index dbf2e1c9ec5c0..e766908910d3a 100644
--- a/tests/tests_lite/plugins/precision/test_native_amp.py
+++ b/tests/tests_lite/plugins/precision/test_native_amp.py
@@ -16,7 +16,6 @@
import pytest
import torch
-from tests_lite.helpers.runif import RunIf
from lightning_lite.plugins.precision.native_amp import NativeMixedPrecision
@@ -41,7 +40,6 @@ def test_native_amp_precision_bf16_min_torch():
NativeMixedPrecision(precision="bf16", device=Mock())
-@RunIf(min_torch="1.10")
def test_native_amp_precision_forward_context():
"""Test to ensure that the context manager correctly is set to CPU + bfloat16."""
precision = NativeMixedPrecision(precision=16, device="cuda")
@@ -84,7 +82,6 @@ def test_native_amp_precision_optimizer_step_with_scaler():
precision.scaler.update.assert_called_once()
-@RunIf(min_torch="1.10")
def test_native_amp_precision_optimizer_step_without_scaler():
precision = NativeMixedPrecision(precision="bf16", device="cuda")
assert precision.scaler is None
diff --git a/tests/tests_lite/plugins/precision/test_native_amp_integration.py b/tests/tests_lite/plugins/precision/test_native_amp_integration.py
index cd927229cd8f8..7da581653ece6 100644
--- a/tests/tests_lite/plugins/precision/test_native_amp_integration.py
+++ b/tests/tests_lite/plugins/precision/test_native_amp_integration.py
@@ -58,7 +58,6 @@ def after_backward(self, model):
assert model.layer.weight.grad.dtype == torch.float32
-@RunIf(min_torch="1.10")
@pytest.mark.parametrize(
"accelerator, precision, expected_dtype",
[
diff --git a/tests/tests_lite/test_parity.py b/tests/tests_lite/test_parity.py
index 03f5f9e61a441..c5687ee58a120 100644
--- a/tests/tests_lite/test_parity.py
+++ b/tests/tests_lite/test_parity.py
@@ -112,7 +112,7 @@ def precision_context(precision, accelerator) -> Generator[None, None, None]:
pytest.param(32, None, 1, "cpu"),
pytest.param(32, None, 1, "gpu", marks=RunIf(min_cuda_gpus=1)),
pytest.param(16, None, 1, "gpu", marks=RunIf(min_cuda_gpus=1)),
- pytest.param("bf16", None, 1, "gpu", marks=RunIf(min_cuda_gpus=1, min_torch="1.10", bf16_cuda=True)),
+ pytest.param("bf16", None, 1, "gpu", marks=RunIf(min_cuda_gpus=1, bf16_cuda=True)),
pytest.param(32, None, 1, "mps", marks=RunIf(mps=True)),
],
)
diff --git a/tests/tests_lite/test_wrappers.py b/tests/tests_lite/test_wrappers.py
index 8b76adc476e83..3e529b63425b4 100644
--- a/tests/tests_lite/test_wrappers.py
+++ b/tests/tests_lite/test_wrappers.py
@@ -112,7 +112,7 @@ def __init__(self):
torch.bfloat16,
"gpu",
"cuda:0",
- marks=RunIf(min_cuda_gpus=1, min_torch="1.10", bf16_cuda=True),
+ marks=RunIf(min_cuda_gpus=1, bf16_cuda=True),
),
pytest.param(
"bf16",
@@ -120,7 +120,7 @@ def __init__(self):
torch.bfloat16,
"gpu",
"cuda:0",
- marks=RunIf(min_cuda_gpus=1, min_torch="1.10", bf16_cuda=True),
+ marks=RunIf(min_cuda_gpus=1, bf16_cuda=True),
),
pytest.param(
"bf16",
@@ -128,7 +128,7 @@ def __init__(self):
torch.bool,
"gpu",
"cuda:0",
- marks=RunIf(min_cuda_gpus=1, min_torch="1.10", bf16_cuda=True),
+ marks=RunIf(min_cuda_gpus=1, bf16_cuda=True),
),
pytest.param(32, torch.float32, torch.float32, "mps", "mps:0", marks=RunIf(mps=True)),
],
diff --git a/tests/tests_lite/utilities/test_distributed.py b/tests/tests_lite/utilities/test_distributed.py
index e426d6394db09..cbd53a83edcd9 100644
--- a/tests/tests_lite/utilities/test_distributed.py
+++ b/tests/tests_lite/utilities/test_distributed.py
@@ -59,7 +59,7 @@ def _test_all_gather_uneven_tensors_multidim(strategy):
assert (val == torch.ones_like(val)).all()
-@RunIf(min_torch="1.10", skip_windows=True)
+@RunIf(skip_windows=True)
@pytest.mark.parametrize(
"process",
[
diff --git a/tests/tests_pytorch/accelerators/test_ipu.py b/tests/tests_pytorch/accelerators/test_ipu.py
index de60aa3a03211..5c0049cdf9085 100644
--- a/tests/tests_pytorch/accelerators/test_ipu.py
+++ b/tests/tests_pytorch/accelerators/test_ipu.py
@@ -128,6 +128,7 @@ def test_no_warning_strategy(tmpdir):
@RunIf(ipu=True)
+@pytest.mark.xfail(raises=NotImplementedError, reason="TODO: issues with latest poptorch")
@pytest.mark.parametrize("devices", [1, 4])
def test_all_stages(tmpdir, devices):
model = IPUModel()
@@ -139,6 +140,7 @@ def test_all_stages(tmpdir, devices):
@RunIf(ipu=True)
+@pytest.mark.xfail(raises=NotImplementedError, reason="TODO: issues with latest poptorch")
@pytest.mark.parametrize("devices", [1, 4])
def test_inference_only(tmpdir, devices):
model = IPUModel()
@@ -150,6 +152,7 @@ def test_inference_only(tmpdir, devices):
@RunIf(ipu=True, sklearn=True)
+@pytest.mark.xfail(reason="TODO: issues with latest poptorch")
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_optimization(tmpdir):
seed_everything(42)
@@ -288,6 +291,7 @@ def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
@RunIf(ipu=True)
+@pytest.mark.xfail(raises=NotImplementedError, reason="TODO: issues with latest poptorch")
def test_stages_correct(tmpdir):
"""Ensure all stages correctly are traced correctly by asserting the output for each stage."""
diff --git a/tests/tests_pytorch/core/test_lightning_module.py b/tests/tests_pytorch/core/test_lightning_module.py
index 2c0757d1cb82d..1c9653faf07b7 100644
--- a/tests/tests_pytorch/core/test_lightning_module.py
+++ b/tests/tests_pytorch/core/test_lightning_module.py
@@ -310,7 +310,7 @@ def assert_device(device: torch.device) -> None:
assert_device(torch.device("cpu"))
-@RunIf(min_torch="1.10", skip_windows=True)
+@RunIf(skip_windows=True)
def test_sharded_tensor_state_dict(single_process_pg):
if _TORCH_GREATER_EQUAL_1_11:
from torch.distributed._shard.sharded_tensor import empty as sharded_tensor_empty
diff --git a/tests/tests_pytorch/models/test_amp.py b/tests/tests_pytorch/models/test_amp.py
index 22b0056334e61..f769a904b7a31 100644
--- a/tests/tests_pytorch/models/test_amp.py
+++ b/tests/tests_pytorch/models/test_amp.py
@@ -67,7 +67,6 @@ def _assert_autocast_enabled(self):
assert torch.is_autocast_enabled()
-@RunIf(min_torch="1.10")
@pytest.mark.flaky(reruns=3)
@pytest.mark.parametrize(
("strategy", "precision", "devices"),
@@ -102,7 +101,7 @@ def test_amp_cpus(tmpdir, strategy, precision, devices):
trainer.predict(model)
-@RunIf(min_cuda_gpus=2, min_torch="1.10")
+@RunIf(min_cuda_gpus=2)
@pytest.mark.parametrize("strategy", [None, "dp", "ddp_spawn"])
@pytest.mark.parametrize("precision", [16, pytest.param("bf16", marks=RunIf(bf16_cuda=True))])
@pytest.mark.parametrize("devices", [1, 2])
@@ -229,7 +228,6 @@ def test_amp_with_apex_reload(tmpdir):
trainer.test(model, ckpt_path="best")
-@RunIf(min_torch="1.10")
@pytest.mark.parametrize("clip_val", [0, 10])
@mock.patch("torch.nn.utils.clip_grad_norm_")
def test_precision_16_clip_gradients(mock_clip_grad_norm, clip_val, tmpdir):
diff --git a/tests/tests_pytorch/models/test_onnx.py b/tests/tests_pytorch/models/test_onnx.py
index 0256577eeba2f..d3d550da6859e 100644
--- a/tests/tests_pytorch/models/test_onnx.py
+++ b/tests/tests_pytorch/models/test_onnx.py
@@ -56,21 +56,6 @@ def test_model_saves_on_gpu(tmpdir, accelerator):
assert os.path.getsize(file_path) > 4e2
-@RunIf(max_torch="1.10")
-def test_model_saves_with_example_output(tmpdir):
- """Test that ONNX model saves when provided with example output."""
- model = BoringModel()
- trainer = Trainer(fast_dev_run=True)
- trainer.fit(model)
-
- file_path = os.path.join(tmpdir, "model.onnx")
- input_sample = torch.randn((1, 32))
- model.eval()
- example_outputs = model.forward(input_sample)
- model.to_onnx(file_path, input_sample, example_outputs=example_outputs)
- assert os.path.exists(file_path) is True
-
-
@pytest.mark.parametrize(
["modelclass", "input_sample"],
[
diff --git a/tests/tests_pytorch/plugins/precision/test_sharded_precision.py b/tests/tests_pytorch/plugins/precision/test_sharded_precision.py
index b231455c6cf6f..017717949e48f 100644
--- a/tests/tests_pytorch/plugins/precision/test_sharded_precision.py
+++ b/tests/tests_pytorch/plugins/precision/test_sharded_precision.py
@@ -30,7 +30,7 @@
[
(16, torch.cuda.amp.GradScaler(), torch.cuda.amp.GradScaler),
(16, None, ShardedGradScaler),
- pytest.param("bf16", None, None, marks=RunIf(min_torch="1.10")),
+ ("bf16", None, None),
(32, None, None),
],
)
diff --git a/tests/tests_pytorch/plugins/test_amp_plugins.py b/tests/tests_pytorch/plugins/test_amp_plugins.py
index ac75986b9e15c..acc7286d28966 100644
--- a/tests/tests_pytorch/plugins/test_amp_plugins.py
+++ b/tests/tests_pytorch/plugins/test_amp_plugins.py
@@ -241,7 +241,6 @@ def test_amp_apex_ddp_spawn_fit(amp_level, tmpdir):
trainer.fit(model)
-@RunIf(min_torch="1.10")
def test_cpu_amp_precision_context_manager(tmpdir):
"""Test to ensure that the context manager correctly is set to CPU + bfloat16."""
plugin = NativeMixedPrecisionPlugin("bf16", "cpu")
diff --git a/tests/tests_pytorch/strategies/test_ddp_strategy.py b/tests/tests_pytorch/strategies/test_ddp_strategy.py
index f8b3ddfed3a3a..58768b2b6ce1e 100644
--- a/tests/tests_pytorch/strategies/test_ddp_strategy.py
+++ b/tests/tests_pytorch/strategies/test_ddp_strategy.py
@@ -290,7 +290,7 @@ def configure_optimizers(self):
return ZeroRedundancyOptimizer(self.layer.parameters(), optimizer_class=torch.optim.Adam, lr=0.1)
-@RunIf(min_cuda_gpus=2, skip_windows=True, min_torch="1.10")
+@RunIf(min_cuda_gpus=2, skip_windows=True)
@pytest.mark.parametrize("strategy", (pytest.param("ddp", marks=RunIf(standalone=True)), "ddp_spawn"))
def test_ddp_strategy_checkpoint_zero_redundancy_optimizer(tmpdir, strategy):
"""Test to ensure that checkpoint is saved correctly when using zero redundancy optimizer."""
diff --git a/tests/tests_pytorch/strategies/test_ddp_strategy_with_comm_hook.py b/tests/tests_pytorch/strategies/test_ddp_strategy_with_comm_hook.py
index 1ee98d9e709a8..f89a1d1a6ce8d 100644
--- a/tests/tests_pytorch/strategies/test_ddp_strategy_with_comm_hook.py
+++ b/tests/tests_pytorch/strategies/test_ddp_strategy_with_comm_hook.py
@@ -134,7 +134,7 @@ def test_ddp_spawn_fp16_compress_comm_hook(tmpdir):
assert trainer.state.finished, f"Training failed with {trainer.state}"
-@RunIf(min_cuda_gpus=2, min_torch="1.10.0", skip_windows=True, standalone=True)
+@RunIf(min_cuda_gpus=2, skip_windows=True, standalone=True)
def test_ddp_post_local_sgd_comm_hook(tmpdir):
"""Test for DDP post-localSGD hook."""
model = BoringModel()
@@ -162,7 +162,7 @@ def test_ddp_post_local_sgd_comm_hook(tmpdir):
assert trainer.state.finished, f"Training failed with {trainer.state}"
-@RunIf(skip_windows=True, min_torch="1.10.0", min_cuda_gpus=2, standalone=True)
+@RunIf(skip_windows=True, min_cuda_gpus=2, standalone=True)
@mock.patch("torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager.average_parameters")
def test_post_local_sgd_model_averaging(average_parameters_mock, tmpdir):
"""Test that when using DDP with post-localSGD, model averaging is called."""
@@ -207,7 +207,7 @@ def test_post_local_sgd_model_averaging(average_parameters_mock, tmpdir):
average_parameters_mock.assert_called()
-@RunIf(skip_windows=True, min_torch="1.10.0", min_cuda_gpus=2, standalone=True)
+@RunIf(skip_windows=True, min_cuda_gpus=2, standalone=True)
@mock.patch("torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager.average_parameters")
def test_post_local_sgd_model_averaging_raises(average_parameters_mock, tmpdir):
"""Test that when using DDP with post-localSGD a ValueError is thrown when the optimizer is
diff --git a/tests/tests_pytorch/utilities/test_distributed.py b/tests/tests_pytorch/utilities/test_distributed.py
index b39dbf7010b6a..2aac01a4c1b8b 100644
--- a/tests/tests_pytorch/utilities/test_distributed.py
+++ b/tests/tests_pytorch/utilities/test_distributed.py
@@ -27,7 +27,7 @@ def collect_states_fn(strategy):
assert collected_state == {1: {"something": torch.tensor([1])}, 0: {"something": torch.tensor([0])}}
-@RunIf(min_cuda_gpus=2, min_torch="1.10", skip_windows=True)
+@RunIf(min_cuda_gpus=2, skip_windows=True)
def test_collect_states():
"""This test ensures state are properly collected across processes.