From 46f886c8c0264446971d787c10cf722e8bed3d20 Mon Sep 17 00:00:00 2001 From: Vasilis Vryniotis Date: Thu, 2 Sep 2021 17:13:00 +0100 Subject: [PATCH] Fix doc --- gallery/plot_transforms.py | 2 +- torchvision/transforms/autoaugment.py | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/gallery/plot_transforms.py b/gallery/plot_transforms.py index 68ffae16a0f..fe5864ebad5 100644 --- a/gallery/plot_transforms.py +++ b/gallery/plot_transforms.py @@ -255,7 +255,7 @@ def plot(imgs, with_orig=True, row_title=None, **imshow_kwargs): #################################### # TrivialAugmentWide -# ~~~~~~~~~~~ +# ~~~~~~~~~~~~~~~~~~ # The :class:`~torchvision.transforms.TrivialAugmentWide` transform automatically augments the data. augmenter = T.TrivialAugmentWide() imgs = [augmenter(orig_img) for _ in range(4)] diff --git a/torchvision/transforms/autoaugment.py b/torchvision/transforms/autoaugment.py index 117030d3a50..4f82bc6acd5 100644 --- a/torchvision/transforms/autoaugment.py +++ b/torchvision/transforms/autoaugment.py @@ -330,17 +330,17 @@ def __repr__(self) -> str: class TrivialAugmentWide(torch.nn.Module): r"""Dataset-independent data-augmentation with TrivialAugment Wide, as described in `"TrivialAugment: Tuning-free Yet State-of-the-Art Data Augmentation" `. - If the image is torch Tensor, it should be of type torch.uint8, and it is expected - to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. - If img is PIL Image, it is expected to be in mode "L" or "RGB". - - Args: - num_magnitude_bins (int): The number of different magnitude values. - interpolation (InterpolationMode): Desired interpolation enum defined by - :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. - If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. - fill (sequence or number, optional): Pixel fill value for the area outside the transformed - image. If given a number, the value is used for all bands respectively. + If the image is torch Tensor, it should be of type torch.uint8, and it is expected + to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + num_magnitude_bins (int): The number of different magnitude values. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. """ def __init__(self, num_magnitude_bins: int = 30, interpolation: InterpolationMode = InterpolationMode.NEAREST,