diff --git a/gallery/plot_video_api.py b/gallery/plot_video_api.py index 65e03c64bc9..fe296d67be0 100644 --- a/gallery/plot_video_api.py +++ b/gallery/plot_video_api.py @@ -35,7 +35,7 @@ # Download the sample video download_url( - "https://github.com/pytorch/vision/blob/master/test/assets/videos/WUzgd7C1pWA.mp4?raw=true", + "https://github.com/pytorch/vision/blob/main/test/assets/videos/WUzgd7C1pWA.mp4?raw=true", ".", "WUzgd7C1pWA.mp4" ) @@ -187,26 +187,26 @@ def example_read_video(video_object, start=0, end=None, read_video=True, read_au # Download the videos from torchvision.datasets.utils import download_url download_url( - "https://github.com/pytorch/vision/blob/master/test/assets/videos/WUzgd7C1pWA.mp4?raw=true", + "https://github.com/pytorch/vision/blob/main/test/assets/videos/WUzgd7C1pWA.mp4?raw=true", "./dataset/1", "WUzgd7C1pWA.mp4" ) download_url( - "https://github.com/pytorch/vision/blob/master/test/assets/videos/RATRACE_wave_f_nm_np1_fr_goo_37.avi?raw=true", + "https://github.com/pytorch/vision/blob/main/test/assets/videos/RATRACE_wave_f_nm_np1_fr_goo_37.avi?raw=true", "./dataset/1", "RATRACE_wave_f_nm_np1_fr_goo_37.avi" ) download_url( - "https://github.com/pytorch/vision/blob/master/test/assets/videos/SOX5yA1l24A.mp4?raw=true", + "https://github.com/pytorch/vision/blob/main/test/assets/videos/SOX5yA1l24A.mp4?raw=true", "./dataset/2", "SOX5yA1l24A.mp4" ) download_url( - "https://github.com/pytorch/vision/blob/master/test/assets/videos/v_SoccerJuggling_g23_c01.avi?raw=true", + "https://github.com/pytorch/vision/blob/main/test/assets/videos/v_SoccerJuggling_g23_c01.avi?raw=true", "./dataset/2", "v_SoccerJuggling_g23_c01.avi" ) download_url( - "https://github.com/pytorch/vision/blob/master/test/assets/videos/v_SoccerJuggling_g24_c01.avi?raw=true", + "https://github.com/pytorch/vision/blob/main/test/assets/videos/v_SoccerJuggling_g24_c01.avi?raw=true", "./dataset/2", "v_SoccerJuggling_g24_c01.avi" ) diff --git a/references/video_classification/README.md b/references/video_classification/README.md index ef7db6dcd90..b2dd9d04f62 100644 --- a/references/video_classification/README.md +++ b/references/video_classification/README.md @@ -22,7 +22,7 @@ python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py --data- ``` **Note:** all our models were trained on 8 nodes with 8 V100 GPUs each for a total of 64 GPUs. Expected training time for 64 GPUs is 24 hours, depending on the storage solution. -**Note 2:** hyperparameters for exact replication of our training can be found [here](https://github.com/pytorch/vision/blob/master/torchvision/models/video/README.md). Some hyperparameters such as learning rate are scaled linearly in proportion to the number of GPUs. +**Note 2:** hyperparameters for exact replication of our training can be found [here](https://github.com/pytorch/vision/blob/main/torchvision/models/video/README.md). Some hyperparameters such as learning rate are scaled linearly in proportion to the number of GPUs. ### Single GPU diff --git a/torchvision/models/video/README.md b/torchvision/models/video/README.md index 23dd673d053..1024534f546 100644 --- a/torchvision/models/video/README.md +++ b/torchvision/models/video/README.md @@ -8,7 +8,7 @@ Additional documentation can be found [here](https://pytorch.org/docs/stable/tor ### Kinetics400 dataset pretraining parameters -See reference training script [here](https://github.com/pytorch/vision/blob/master/references/video_classification/train.py): +See reference training script [here](https://github.com/pytorch/vision/blob/main/references/video_classification/train.py): - input size: [3, 16, 112, 112] - input space: RGB diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index a5582b20a45..4b3c08dbce7 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -83,7 +83,7 @@ class ToTensor: Because the input image is scaled to [0.0, 1.0], this transformation should not be used when transforming target image masks. See the `references`_ for implementing the transforms for image masks. - .. _references: https://github.com/pytorch/vision/tree/master/references/segmentation + .. _references: https://github.com/pytorch/vision/tree/main/references/segmentation """ def __call__(self, pic):