diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index eec93854788..b9754e29b1c 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -7,3 +7,5 @@ d367a01a18a3ae6bee13d8be3b63fd6a581ea46f # Upgrade usort to 1.0.2 and black to 22.3.0 (#5106) 6ca9c76adb6daf2695d603ad623a9cf1c4f4806f +# Fix unnecessary exploded black formatting (#7709) +a335d916db0694770e8152f41e19195de3134523 diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index 43f54e6f107..fb3f5744e54 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -293,24 +293,8 @@ def test_translations(self, device, height, width, dt, t, fn): (33, (5, -4), 1.0, [0.0, 0.0], [0, 0, 0]), (45, [-5, 4], 1.2, [0.0, 0.0], (1, 2, 3)), (33, (-4, -8), 2.0, [0.0, 0.0], [255, 255, 255]), - ( - 85, - (10, -10), - 0.7, - [0.0, 0.0], - [ - 1, - ], - ), - ( - 0, - [0, 0], - 1.0, - [ - 35.0, - ], - (2.0,), - ), + (85, (10, -10), 0.7, [0.0, 0.0], [1]), + (0, [0, 0], 1.0, [35.0], (2.0,)), (-25, [0, 0], 1.2, [0.0, 15.0], None), (-45, [-10, 0], 0.7, [2.0, 5.0], None), (-45, [-10, -10], 1.2, [4.0, 5.0], None), @@ -392,19 +376,7 @@ def _get_data_dims_and_points_for_perspective(): @pytest.mark.parametrize("device", cpu_and_cuda()) @pytest.mark.parametrize("dims_and_points", _get_data_dims_and_points_for_perspective()) @pytest.mark.parametrize("dt", [None, torch.float32, torch.float64, torch.float16]) -@pytest.mark.parametrize( - "fill", - ( - None, - [0, 0, 0], - [1, 2, 3], - [255, 255, 255], - [ - 1, - ], - (2.0,), - ), -) +@pytest.mark.parametrize("fill", (None, [0, 0, 0], [1, 2, 3], [255, 255, 255], [1], (2.0,))) @pytest.mark.parametrize("fn", [F.perspective, torch.jit.script(F.perspective)]) def test_perspective_pil_vs_tensor(device, dims_and_points, dt, fill, fn): @@ -475,19 +447,7 @@ def test_perspective_interpolation_type(): @pytest.mark.parametrize("device", cpu_and_cuda()) @pytest.mark.parametrize("dt", [None, torch.float32, torch.float64, torch.float16]) -@pytest.mark.parametrize( - "size", - [ - 32, - 26, - [ - 32, - ], - [32, 32], - (32, 32), - [26, 35], - ], -) +@pytest.mark.parametrize("size", [32, 26, [32], [32, 32], (32, 32), [26, 35]]) @pytest.mark.parametrize("max_size", [None, 34, 40, 1000]) @pytest.mark.parametrize("interpolation", [BILINEAR, BICUBIC, NEAREST, NEAREST_EXACT]) def test_resize(device, dt, size, max_size, interpolation): diff --git a/test/test_transforms_v2_functional.py b/test/test_transforms_v2_functional.py index 9a2ea37a4ae..93996432aa5 100644 --- a/test/test_transforms_v2_functional.py +++ b/test/test_transforms_v2_functional.py @@ -539,6 +539,7 @@ def test_bounding_box_format_consistency(self, info, args_kwargs): (F.to_pil_image, F.to_image_pil), (F.elastic_transform, F.elastic), (F.convert_image_dtype, F.convert_dtype_image_tensor), + (F.to_grayscale, F.rgb_to_grayscale), ] ], ) diff --git a/torchvision/transforms/functional.py b/torchvision/transforms/functional.py index 2c2f1e19359..3e81005c6d6 100644 --- a/torchvision/transforms/functional.py +++ b/torchvision/transforms/functional.py @@ -1248,7 +1248,7 @@ def affine( # Looks like to_grayscale() is a stand-alone functional that is never called # from the transform classes. Perhaps it's still here for BC? I can't be -# bothered to dig. Anyway, this can be deprecated as we migrate to V2. +# bothered to dig. @torch.jit.unused def to_grayscale(img, num_output_channels=1): """Convert PIL image of any mode (RGB, HSV, LAB, etc) to grayscale version of image. diff --git a/torchvision/transforms/v2/functional/__init__.py b/torchvision/transforms/v2/functional/__init__.py index ffb34c87748..b4803f4f1b9 100644 --- a/torchvision/transforms/v2/functional/__init__.py +++ b/torchvision/transforms/v2/functional/__init__.py @@ -76,6 +76,7 @@ solarize_image_pil, solarize_image_tensor, solarize_video, + to_grayscale, ) from ._geometry import ( affine, @@ -168,4 +169,4 @@ from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image -from ._deprecated import get_image_size, to_grayscale, to_tensor # usort: skip +from ._deprecated import get_image_size, to_tensor # usort: skip diff --git a/torchvision/transforms/v2/functional/_color.py b/torchvision/transforms/v2/functional/_color.py index 4ba7e5b36b3..13417e4a990 100644 --- a/torchvision/transforms/v2/functional/_color.py +++ b/torchvision/transforms/v2/functional/_color.py @@ -56,6 +56,11 @@ def rgb_to_grayscale( ) +# `to_grayscale` actually predates `rgb_to_grayscale` in v1, but only handles PIL images. Since `rgb_to_grayscale` is a +# superset in terms of functionality and has the same signature, we alias here to avoid disruption. +to_grayscale = rgb_to_grayscale + + def _blend(image1: torch.Tensor, image2: torch.Tensor, ratio: float) -> torch.Tensor: ratio = float(ratio) fp = image1.is_floating_point() diff --git a/torchvision/transforms/v2/functional/_deprecated.py b/torchvision/transforms/v2/functional/_deprecated.py index c88e3eb81c1..c9a0f647e60 100644 --- a/torchvision/transforms/v2/functional/_deprecated.py +++ b/torchvision/transforms/v2/functional/_deprecated.py @@ -1,22 +1,12 @@ import warnings from typing import Any, List, Union -import PIL.Image import torch from torchvision import datapoints from torchvision.transforms import functional as _F -@torch.jit.unused -def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image: - warnings.warn( - "The function `to_grayscale` is deprecated in will be removed in a future release. " - "Instead, please use `rgb_to_grayscale`.", - ) - return _F.to_grayscale(inpt, num_output_channels=num_output_channels) - - @torch.jit.unused def to_tensor(inpt: Any) -> torch.Tensor: warnings.warn(