From d8583f6776d44262d48c7bef05cfe0d53220b2ab Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Fri, 18 Aug 2023 12:00:33 +0100 Subject: [PATCH] Add links to collab --- docs/source/conf.py | 20 +++++++++++++++++++ gallery/others/plot_optical_flow.py | 4 ++++ .../others/plot_repurposing_annotations.py | 4 ++++ .../others/plot_scripted_tensor_transforms.py | 4 ++++ gallery/others/plot_transforms.py | 4 ++++ gallery/others/plot_video_api.py | 8 ++++++-- gallery/others/plot_visualization_utils.py | 4 ++++ .../v2_transforms/plot_custom_datapoints.py | 4 ++++ .../v2_transforms/plot_custom_transforms.py | 4 ++++ gallery/v2_transforms/plot_cutmix_mixup.py | 4 ++++ gallery/v2_transforms/plot_datapoints.py | 5 ++++- gallery/v2_transforms/plot_transforms_v2.py | 4 ++++ .../v2_transforms/plot_transforms_v2_e2e.py | 4 ++++ 13 files changed, 70 insertions(+), 3 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 4a331b6cd75..fc67c7a345d 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,6 +59,26 @@ "beta_status", ] +# We override sphinx-gallery's example header to prevent sphinx-gallery from +# creating a note at the top of the renderred notebook. +# https://github.com/sphinx-gallery/sphinx-gallery/blob/451ccba1007cc523f39cbcc960ebc21ca39f7b75/sphinx_gallery/gen_rst.py#L1267-L1271 +# This is because we also want to add a link to google collab, so we write our own note in each example. +from sphinx_gallery import gen_rst + +gen_rst.EXAMPLE_HEADER = """ +.. DO NOT EDIT. +.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. +.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: +.. "{0}" +.. LINE NUMBERS ARE GIVEN BELOW. + +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_{1}: + +""" + + sphinx_gallery_conf = { "examples_dirs": "../../gallery/", # path to your example scripts "gallery_dirs": "auto_examples", # path to where to save gallery generated output diff --git a/gallery/others/plot_optical_flow.py b/gallery/others/plot_optical_flow.py index 499f8c66398..bc734a6e0ec 100644 --- a/gallery/others/plot_optical_flow.py +++ b/gallery/others/plot_optical_flow.py @@ -3,6 +3,10 @@ Optical Flow: Predicting movement with the RAFT model ===================================================== +.. note:: + Try on `collab `_ + or :ref:`go to the end ` to download the full example code. + Optical flow is the task of predicting movement between two images, usually two consecutive frames of a video. Optical flow models take two images as input, and predict a flow: the flow indicates the displacement of every single pixel in the diff --git a/gallery/others/plot_repurposing_annotations.py b/gallery/others/plot_repurposing_annotations.py index f47c301812b..b1617cacd99 100644 --- a/gallery/others/plot_repurposing_annotations.py +++ b/gallery/others/plot_repurposing_annotations.py @@ -3,6 +3,10 @@ Repurposing masks into bounding boxes ===================================== +.. note:: + Try on `collab `_ + or :ref:`go to the end ` to download the full example code. + The following example illustrates the operations available the :ref:`torchvision.ops ` module for repurposing segmentation masks into object localization annotations for different tasks diff --git a/gallery/others/plot_scripted_tensor_transforms.py b/gallery/others/plot_scripted_tensor_transforms.py index 5bf48d69f36..85b332c4ca1 100644 --- a/gallery/others/plot_scripted_tensor_transforms.py +++ b/gallery/others/plot_scripted_tensor_transforms.py @@ -3,6 +3,10 @@ Tensor transforms and JIT ========================= +.. note:: + Try on `collab `_ + or :ref:`go to the end ` to download the full example code. + This example illustrates various features that are now supported by the :ref:`image transformations ` on Tensor images. In particular, we show how image transforms can be performed on GPU, and how one can also script diff --git a/gallery/others/plot_transforms.py b/gallery/others/plot_transforms.py index 2cb0e34693c..9702bc9c3ba 100644 --- a/gallery/others/plot_transforms.py +++ b/gallery/others/plot_transforms.py @@ -3,6 +3,10 @@ Illustration of transforms ========================== +.. note:: + Try on `collab `_ + or :ref:`go to the end ` to download the full example code. + This example illustrates the various transforms available in :ref:`the torchvision.transforms module `. """ diff --git a/gallery/others/plot_video_api.py b/gallery/others/plot_video_api.py index aa3a620a613..ac9eb0ba27d 100644 --- a/gallery/others/plot_video_api.py +++ b/gallery/others/plot_video_api.py @@ -1,7 +1,11 @@ """ -======================= +========= Video API -======================= +========= + +.. note:: + Try on `collab `_ + or :ref:`go to the end ` to download the full example code. This example illustrates some of the APIs that torchvision offers for videos, together with the examples on how to build datasets and more. diff --git a/gallery/others/plot_visualization_utils.py b/gallery/others/plot_visualization_utils.py index bb3d1c8bcfc..98089c54dbb 100644 --- a/gallery/others/plot_visualization_utils.py +++ b/gallery/others/plot_visualization_utils.py @@ -3,6 +3,10 @@ Visualization utilities ======================= +.. note:: + Try on `collab `_ + or :ref:`go to the end ` to download the full example code. + This example illustrates some of the utilities that torchvision offers for visualizing images, bounding boxes, segmentation masks and keypoints. """ diff --git a/gallery/v2_transforms/plot_custom_datapoints.py b/gallery/v2_transforms/plot_custom_datapoints.py index dcad5f0a406..1415bdedc0e 100644 --- a/gallery/v2_transforms/plot_custom_datapoints.py +++ b/gallery/v2_transforms/plot_custom_datapoints.py @@ -3,6 +3,10 @@ How to write your own Datapoint class ===================================== +.. note:: + Try on `collab `_ + or :ref:`go to the end ` to download the full example code. + This guide is intended for advanced users and downstream library maintainers. We explain how to write your own datapoint class, and how to make it compatible with the built-in Torchvision v2 transforms. Before continuing, make sure you have read diff --git a/gallery/v2_transforms/plot_custom_transforms.py b/gallery/v2_transforms/plot_custom_transforms.py index eba8e91faf4..2afaba735f6 100644 --- a/gallery/v2_transforms/plot_custom_transforms.py +++ b/gallery/v2_transforms/plot_custom_transforms.py @@ -3,6 +3,10 @@ How to write your own v2 transforms =================================== +.. note:: + Try on `collab `_ + or :ref:`go to the end ` to download the full example code. + This guide explains how to write transforms that are compatible with the torchvision transforms V2 API. """ diff --git a/gallery/v2_transforms/plot_cutmix_mixup.py b/gallery/v2_transforms/plot_cutmix_mixup.py index 932ce325b56..4b94f618bbb 100644 --- a/gallery/v2_transforms/plot_cutmix_mixup.py +++ b/gallery/v2_transforms/plot_cutmix_mixup.py @@ -4,6 +4,10 @@ How to use CutMix and MixUp =========================== +.. note:: + Try on `collab `_ + or :ref:`go to the end ` to download the full example code. + :class:`~torchvision.transforms.v2.CutMix` and :class:`~torchvision.transforms.v2.MixUp` are popular augmentation strategies that can improve classification accuracy. diff --git a/gallery/v2_transforms/plot_datapoints.py b/gallery/v2_transforms/plot_datapoints.py index 0bab2d34088..8da508e2dce 100644 --- a/gallery/v2_transforms/plot_datapoints.py +++ b/gallery/v2_transforms/plot_datapoints.py @@ -3,7 +3,10 @@ Datapoints FAQ ============== -https://colab.research.google.com/github/pytorch/vision/blob/gh-pages/_generated_ipynb_notebooks/plot_datapoints.ipynb +.. note:: + Try on `collab `_ + or :ref:`go to the end ` to download the full example code. + Datapoints are Tensor subclasses introduced together with ``torchvision.transforms.v2``. This example showcases what these datapoints are diff --git a/gallery/v2_transforms/plot_transforms_v2.py b/gallery/v2_transforms/plot_transforms_v2.py index e6c8b3ffdb5..0dd788e8c39 100644 --- a/gallery/v2_transforms/plot_transforms_v2.py +++ b/gallery/v2_transforms/plot_transforms_v2.py @@ -3,6 +3,10 @@ Getting started with transforms v2 ================================== +.. note:: + Try on `collab `_ + or :ref:`go to the end ` to download the full example code. + Most computer vision tasks are not supported out of the box by ``torchvision.transforms`` v1, since it only supports images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding boxes, and masks. This example showcases the core functionality of the new ``torchvision.transforms.v2`` API. diff --git a/gallery/v2_transforms/plot_transforms_v2_e2e.py b/gallery/v2_transforms/plot_transforms_v2_e2e.py index e6a36ebbf58..b023284f79f 100644 --- a/gallery/v2_transforms/plot_transforms_v2_e2e.py +++ b/gallery/v2_transforms/plot_transforms_v2_e2e.py @@ -3,6 +3,10 @@ Transforms v2: End-to-end object detection example ================================================== +.. note:: + Try on `collab `_ + or :ref:`go to the end ` to download the full example code. + Object detection is not supported out of the box by ``torchvision.transforms`` v1, since it only supports images. ``torchvision.transforms.v2`` enables jointly transforming images, videos, bounding boxes, and masks. This example showcases an end-to-end object detection training using the stable ``torchvision.datasets`` and ``torchvision.models``