From 58b8ebac7f7161aa58ecdee10d91cde64189b5b9 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Tue, 11 Jul 2023 13:30:02 +0200 Subject: [PATCH] make datapoint methods private --- test/test_transforms_v2.py | 10 ++-- test/test_transforms_v2_functional.py | 2 +- test/test_transforms_v2_refactored.py | 2 +- torchvision/datapoints/_bounding_box.py | 22 ++++---- torchvision/datapoints/_datapoint.py | 53 ++++++++++--------- torchvision/datapoints/_image.py | 50 ++++++++--------- torchvision/datapoints/_mask.py | 22 ++++---- torchvision/datapoints/_video.py | 50 ++++++++--------- .../transforms/v2/functional/_color.py | 24 ++++----- .../transforms/v2/functional/_geometry.py | 22 ++++---- torchvision/transforms/v2/functional/_misc.py | 4 +- 11 files changed, 131 insertions(+), 130 deletions(-) diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 3743581794f..d033858c5eb 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -1417,12 +1417,12 @@ def test_antialias_warning(): transforms.RandomResize(10, 20)(tensor_img) with pytest.warns(UserWarning, match=match): - datapoints.Image(tensor_img).resized_crop(0, 0, 10, 10, (20, 20)) + datapoints.Image(tensor_img)._resized_crop(0, 0, 10, 10, (20, 20)) with pytest.warns(UserWarning, match=match): - datapoints.Video(tensor_video).resize((20, 20)) + datapoints.Video(tensor_video)._resize((20, 20)) with pytest.warns(UserWarning, match=match): - datapoints.Video(tensor_video).resized_crop(0, 0, 10, 10, (20, 20)) + datapoints.Video(tensor_video)._resized_crop(0, 0, 10, 10, (20, 20)) with warnings.catch_warnings(): warnings.simplefilter("error") @@ -1436,8 +1436,8 @@ def test_antialias_warning(): transforms.RandomShortestSize((20, 20), antialias=True)(tensor_img) transforms.RandomResize(10, 20, antialias=True)(tensor_img) - datapoints.Image(tensor_img).resized_crop(0, 0, 10, 10, (20, 20), antialias=True) - datapoints.Video(tensor_video).resized_crop(0, 0, 10, 10, (20, 20), antialias=True) + datapoints.Image(tensor_img)._resized_crop(0, 0, 10, 10, (20, 20), antialias=True) + datapoints.Video(tensor_video)._resized_crop(0, 0, 10, 10, (20, 20), antialias=True) @pytest.mark.parametrize("image_type", (PIL.Image, torch.Tensor, datapoints.Image)) diff --git a/test/test_transforms_v2_functional.py b/test/test_transforms_v2_functional.py index 465cc227107..1933ed9b12c 100644 --- a/test/test_transforms_v2_functional.py +++ b/test/test_transforms_v2_functional.py @@ -424,7 +424,7 @@ def test_pil_output_type(self, info, args_kwargs): def test_dispatch_datapoint(self, info, args_kwargs, spy_on): (datapoint, *other_args), kwargs = args_kwargs.load() - method_name = info.id + method_name = f"_{info.id}" method = getattr(datapoint, method_name) datapoint_type = type(datapoint) spy = spy_on(method, module=datapoint_type.__module__, name=f"{datapoint_type.__name__}.{method_name}") diff --git a/test/test_transforms_v2_refactored.py b/test/test_transforms_v2_refactored.py index 69180b99dbc..78d32f87f71 100644 --- a/test/test_transforms_v2_refactored.py +++ b/test/test_transforms_v2_refactored.py @@ -252,7 +252,7 @@ def _check_dispatcher_datapoint_signature_match(dispatcher): dispatcher_signature = inspect.signature(dispatcher) dispatcher_params = list(dispatcher_signature.parameters.values())[1:] - datapoint_method = getattr(datapoints._datapoint.Datapoint, dispatcher.__name__) + datapoint_method = getattr(datapoints._datapoint.Datapoint, f"_{dispatcher.__name__}") datapoint_signature = inspect.signature(datapoint_method) datapoint_params = list(datapoint_signature.parameters.values())[1:] diff --git a/torchvision/datapoints/_bounding_box.py b/torchvision/datapoints/_bounding_box.py index 11d42f171e4..5a3b898a7df 100644 --- a/torchvision/datapoints/_bounding_box.py +++ b/torchvision/datapoints/_bounding_box.py @@ -98,19 +98,19 @@ def wrap_like( def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override] return self._make_repr(format=self.format, spatial_size=self.spatial_size) - def horizontal_flip(self) -> BoundingBox: + def _horizontal_flip(self) -> BoundingBox: output = self._F.horizontal_flip_bounding_box( self.as_subclass(torch.Tensor), format=self.format, spatial_size=self.spatial_size ) return BoundingBox.wrap_like(self, output) - def vertical_flip(self) -> BoundingBox: + def _vertical_flip(self) -> BoundingBox: output = self._F.vertical_flip_bounding_box( self.as_subclass(torch.Tensor), format=self.format, spatial_size=self.spatial_size ) return BoundingBox.wrap_like(self, output) - def resize( # type: ignore[override] + def _resize( self, size: List[int], interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, @@ -125,19 +125,19 @@ def resize( # type: ignore[override] ) return BoundingBox.wrap_like(self, output, spatial_size=spatial_size) - def crop(self, top: int, left: int, height: int, width: int) -> BoundingBox: + def _crop(self, top: int, left: int, height: int, width: int) -> BoundingBox: output, spatial_size = self._F.crop_bounding_box( self.as_subclass(torch.Tensor), self.format, top=top, left=left, height=height, width=width ) return BoundingBox.wrap_like(self, output, spatial_size=spatial_size) - def center_crop(self, output_size: List[int]) -> BoundingBox: + def _center_crop(self, output_size: List[int]) -> BoundingBox: output, spatial_size = self._F.center_crop_bounding_box( self.as_subclass(torch.Tensor), format=self.format, spatial_size=self.spatial_size, output_size=output_size ) return BoundingBox.wrap_like(self, output, spatial_size=spatial_size) - def resized_crop( + def _resized_crop( self, top: int, left: int, @@ -152,7 +152,7 @@ def resized_crop( ) return BoundingBox.wrap_like(self, output, spatial_size=spatial_size) - def pad( + def _pad( self, padding: Union[int, Sequence[int]], fill: Optional[Union[int, float, List[float]]] = None, @@ -167,7 +167,7 @@ def pad( ) return BoundingBox.wrap_like(self, output, spatial_size=spatial_size) - def rotate( + def _rotate( self, angle: float, interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, @@ -185,7 +185,7 @@ def rotate( ) return BoundingBox.wrap_like(self, output, spatial_size=spatial_size) - def affine( + def _affine( self, angle: Union[int, float], translate: List[float], @@ -207,7 +207,7 @@ def affine( ) return BoundingBox.wrap_like(self, output) - def perspective( + def _perspective( self, startpoints: Optional[List[List[int]]], endpoints: Optional[List[List[int]]], @@ -225,7 +225,7 @@ def perspective( ) return BoundingBox.wrap_like(self, output) - def elastic( + def _elastic( self, displacement: torch.Tensor, interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, diff --git a/torchvision/datapoints/_datapoint.py b/torchvision/datapoints/_datapoint.py index 0dabec58f25..41fa941826e 100644 --- a/torchvision/datapoints/_datapoint.py +++ b/torchvision/datapoints/_datapoint.py @@ -142,15 +142,13 @@ def __deepcopy__(self: D, memo: Dict[int, Any]) -> D: # `BoundingBox.clone()`. return self.detach().clone().requires_grad_(self.requires_grad) # type: ignore[return-value] - def horizontal_flip(self) -> Datapoint: + def _horizontal_flip(self) -> Datapoint: return self - def vertical_flip(self) -> Datapoint: + def _vertical_flip(self) -> Datapoint: return self - # TODO: We have to ignore override mypy error as there is torch.Tensor built-in deprecated op: Tensor.resize - # https://github.com/pytorch/pytorch/blob/e8727994eb7cdb2ab642749d6549bc497563aa06/torch/_tensor.py#L588-L593 - def resize( # type: ignore[override] + def _resize( self, size: List[int], interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, @@ -159,13 +157,13 @@ def resize( # type: ignore[override] ) -> Datapoint: return self - def crop(self, top: int, left: int, height: int, width: int) -> Datapoint: + def _crop(self, top: int, left: int, height: int, width: int) -> Datapoint: return self - def center_crop(self, output_size: List[int]) -> Datapoint: + def _center_crop(self, output_size: List[int]) -> Datapoint: return self - def resized_crop( + def _resized_crop( self, top: int, left: int, @@ -177,7 +175,7 @@ def resized_crop( ) -> Datapoint: return self - def pad( + def _pad( self, padding: List[int], fill: Optional[Union[int, float, List[float]]] = None, @@ -185,7 +183,7 @@ def pad( ) -> Datapoint: return self - def rotate( + def _rotate( self, angle: float, interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, @@ -195,7 +193,7 @@ def rotate( ) -> Datapoint: return self - def affine( + def _affine( self, angle: Union[int, float], translate: List[float], @@ -207,7 +205,7 @@ def affine( ) -> Datapoint: return self - def perspective( + def _perspective( self, startpoints: Optional[List[List[int]]], endpoints: Optional[List[List[int]]], @@ -217,7 +215,7 @@ def perspective( ) -> Datapoint: return self - def elastic( + def _elastic( self, displacement: torch.Tensor, interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, @@ -225,43 +223,46 @@ def elastic( ) -> Datapoint: return self - def rgb_to_grayscale(self, num_output_channels: int = 1) -> Datapoint: + def _rgb_to_grayscale(self, num_output_channels: int = 1) -> Datapoint: return self - def adjust_brightness(self, brightness_factor: float) -> Datapoint: + def _adjust_brightness(self, brightness_factor: float) -> Datapoint: return self - def adjust_saturation(self, saturation_factor: float) -> Datapoint: + def _adjust_saturation(self, saturation_factor: float) -> Datapoint: return self - def adjust_contrast(self, contrast_factor: float) -> Datapoint: + def _adjust_contrast(self, contrast_factor: float) -> Datapoint: return self - def adjust_sharpness(self, sharpness_factor: float) -> Datapoint: + def _adjust_sharpness(self, sharpness_factor: float) -> Datapoint: return self - def adjust_hue(self, hue_factor: float) -> Datapoint: + def _adjust_hue(self, hue_factor: float) -> Datapoint: return self - def adjust_gamma(self, gamma: float, gain: float = 1) -> Datapoint: + def _adjust_gamma(self, gamma: float, gain: float = 1) -> Datapoint: return self - def posterize(self, bits: int) -> Datapoint: + def _posterize(self, bits: int) -> Datapoint: return self - def solarize(self, threshold: float) -> Datapoint: + def _solarize(self, threshold: float) -> Datapoint: return self - def autocontrast(self) -> Datapoint: + def _autocontrast(self) -> Datapoint: return self - def equalize(self) -> Datapoint: + def _equalize(self) -> Datapoint: return self - def invert(self) -> Datapoint: + def _invert(self) -> Datapoint: return self - def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Datapoint: + def _gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Datapoint: + return self + + def _normalize(self, mean: List[float], std: List[float], inplace: bool = False) -> Datapoint: return self diff --git a/torchvision/datapoints/_image.py b/torchvision/datapoints/_image.py index e47a6c10fc3..267c28d945d 100644 --- a/torchvision/datapoints/_image.py +++ b/torchvision/datapoints/_image.py @@ -64,15 +64,15 @@ def spatial_size(self) -> Tuple[int, int]: def num_channels(self) -> int: return self.shape[-3] - def horizontal_flip(self) -> Image: + def _horizontal_flip(self) -> Image: output = self._F.horizontal_flip_image_tensor(self.as_subclass(torch.Tensor)) return Image.wrap_like(self, output) - def vertical_flip(self) -> Image: + def _vertical_flip(self) -> Image: output = self._F.vertical_flip_image_tensor(self.as_subclass(torch.Tensor)) return Image.wrap_like(self, output) - def resize( # type: ignore[override] + def _resize( self, size: List[int], interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, @@ -84,15 +84,15 @@ def resize( # type: ignore[override] ) return Image.wrap_like(self, output) - def crop(self, top: int, left: int, height: int, width: int) -> Image: + def _crop(self, top: int, left: int, height: int, width: int) -> Image: output = self._F.crop_image_tensor(self.as_subclass(torch.Tensor), top, left, height, width) return Image.wrap_like(self, output) - def center_crop(self, output_size: List[int]) -> Image: + def _center_crop(self, output_size: List[int]) -> Image: output = self._F.center_crop_image_tensor(self.as_subclass(torch.Tensor), output_size=output_size) return Image.wrap_like(self, output) - def resized_crop( + def _resized_crop( self, top: int, left: int, @@ -114,7 +114,7 @@ def resized_crop( ) return Image.wrap_like(self, output) - def pad( + def _pad( self, padding: List[int], fill: Optional[Union[int, float, List[float]]] = None, @@ -123,7 +123,7 @@ def pad( output = self._F.pad_image_tensor(self.as_subclass(torch.Tensor), padding, fill=fill, padding_mode=padding_mode) return Image.wrap_like(self, output) - def rotate( + def _rotate( self, angle: float, interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, @@ -136,7 +136,7 @@ def rotate( ) return Image.wrap_like(self, output) - def affine( + def _affine( self, angle: Union[int, float], translate: List[float], @@ -158,7 +158,7 @@ def affine( ) return Image.wrap_like(self, output) - def perspective( + def _perspective( self, startpoints: Optional[List[List[int]]], endpoints: Optional[List[List[int]]], @@ -176,7 +176,7 @@ def perspective( ) return Image.wrap_like(self, output) - def elastic( + def _elastic( self, displacement: torch.Tensor, interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, @@ -187,69 +187,69 @@ def elastic( ) return Image.wrap_like(self, output) - def rgb_to_grayscale(self, num_output_channels: int = 1) -> Image: + def _rgb_to_grayscale(self, num_output_channels: int = 1) -> Image: output = self._F.rgb_to_grayscale_image_tensor( self.as_subclass(torch.Tensor), num_output_channels=num_output_channels ) return Image.wrap_like(self, output) - def adjust_brightness(self, brightness_factor: float) -> Image: + def _adjust_brightness(self, brightness_factor: float) -> Image: output = self._F.adjust_brightness_image_tensor( self.as_subclass(torch.Tensor), brightness_factor=brightness_factor ) return Image.wrap_like(self, output) - def adjust_saturation(self, saturation_factor: float) -> Image: + def _adjust_saturation(self, saturation_factor: float) -> Image: output = self._F.adjust_saturation_image_tensor( self.as_subclass(torch.Tensor), saturation_factor=saturation_factor ) return Image.wrap_like(self, output) - def adjust_contrast(self, contrast_factor: float) -> Image: + def _adjust_contrast(self, contrast_factor: float) -> Image: output = self._F.adjust_contrast_image_tensor(self.as_subclass(torch.Tensor), contrast_factor=contrast_factor) return Image.wrap_like(self, output) - def adjust_sharpness(self, sharpness_factor: float) -> Image: + def _adjust_sharpness(self, sharpness_factor: float) -> Image: output = self._F.adjust_sharpness_image_tensor( self.as_subclass(torch.Tensor), sharpness_factor=sharpness_factor ) return Image.wrap_like(self, output) - def adjust_hue(self, hue_factor: float) -> Image: + def _adjust_hue(self, hue_factor: float) -> Image: output = self._F.adjust_hue_image_tensor(self.as_subclass(torch.Tensor), hue_factor=hue_factor) return Image.wrap_like(self, output) - def adjust_gamma(self, gamma: float, gain: float = 1) -> Image: + def _adjust_gamma(self, gamma: float, gain: float = 1) -> Image: output = self._F.adjust_gamma_image_tensor(self.as_subclass(torch.Tensor), gamma=gamma, gain=gain) return Image.wrap_like(self, output) - def posterize(self, bits: int) -> Image: + def _posterize(self, bits: int) -> Image: output = self._F.posterize_image_tensor(self.as_subclass(torch.Tensor), bits=bits) return Image.wrap_like(self, output) - def solarize(self, threshold: float) -> Image: + def _solarize(self, threshold: float) -> Image: output = self._F.solarize_image_tensor(self.as_subclass(torch.Tensor), threshold=threshold) return Image.wrap_like(self, output) - def autocontrast(self) -> Image: + def _autocontrast(self) -> Image: output = self._F.autocontrast_image_tensor(self.as_subclass(torch.Tensor)) return Image.wrap_like(self, output) - def equalize(self) -> Image: + def _equalize(self) -> Image: output = self._F.equalize_image_tensor(self.as_subclass(torch.Tensor)) return Image.wrap_like(self, output) - def invert(self) -> Image: + def _invert(self) -> Image: output = self._F.invert_image_tensor(self.as_subclass(torch.Tensor)) return Image.wrap_like(self, output) - def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Image: + def _gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Image: output = self._F.gaussian_blur_image_tensor( self.as_subclass(torch.Tensor), kernel_size=kernel_size, sigma=sigma ) return Image.wrap_like(self, output) - def normalize(self, mean: List[float], std: List[float], inplace: bool = False) -> Image: + def _normalize(self, mean: List[float], std: List[float], inplace: bool = False) -> Image: output = self._F.normalize_image_tensor(self.as_subclass(torch.Tensor), mean=mean, std=std, inplace=inplace) return Image.wrap_like(self, output) diff --git a/torchvision/datapoints/_mask.py b/torchvision/datapoints/_mask.py index 0135d793d32..c6f1d209cc3 100644 --- a/torchvision/datapoints/_mask.py +++ b/torchvision/datapoints/_mask.py @@ -55,15 +55,15 @@ def wrap_like( def spatial_size(self) -> Tuple[int, int]: return tuple(self.shape[-2:]) # type: ignore[return-value] - def horizontal_flip(self) -> Mask: + def _horizontal_flip(self) -> Mask: output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor)) return Mask.wrap_like(self, output) - def vertical_flip(self) -> Mask: + def _vertical_flip(self) -> Mask: output = self._F.vertical_flip_mask(self.as_subclass(torch.Tensor)) return Mask.wrap_like(self, output) - def resize( # type: ignore[override] + def _resize( self, size: List[int], interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, @@ -73,15 +73,15 @@ def resize( # type: ignore[override] output = self._F.resize_mask(self.as_subclass(torch.Tensor), size, max_size=max_size) return Mask.wrap_like(self, output) - def crop(self, top: int, left: int, height: int, width: int) -> Mask: + def _crop(self, top: int, left: int, height: int, width: int) -> Mask: output = self._F.crop_mask(self.as_subclass(torch.Tensor), top, left, height, width) return Mask.wrap_like(self, output) - def center_crop(self, output_size: List[int]) -> Mask: + def _center_crop(self, output_size: List[int]) -> Mask: output = self._F.center_crop_mask(self.as_subclass(torch.Tensor), output_size=output_size) return Mask.wrap_like(self, output) - def resized_crop( + def _resized_crop( self, top: int, left: int, @@ -94,7 +94,7 @@ def resized_crop( output = self._F.resized_crop_mask(self.as_subclass(torch.Tensor), top, left, height, width, size=size) return Mask.wrap_like(self, output) - def pad( + def _pad( self, padding: List[int], fill: Optional[Union[int, float, List[float]]] = None, @@ -103,7 +103,7 @@ def pad( output = self._F.pad_mask(self.as_subclass(torch.Tensor), padding, padding_mode=padding_mode, fill=fill) return Mask.wrap_like(self, output) - def rotate( + def _rotate( self, angle: float, interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, @@ -114,7 +114,7 @@ def rotate( output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill) return Mask.wrap_like(self, output) - def affine( + def _affine( self, angle: Union[int, float], translate: List[float], @@ -135,7 +135,7 @@ def affine( ) return Mask.wrap_like(self, output) - def perspective( + def _perspective( self, startpoints: Optional[List[List[int]]], endpoints: Optional[List[List[int]]], @@ -148,7 +148,7 @@ def perspective( ) return Mask.wrap_like(self, output) - def elastic( + def _elastic( self, displacement: torch.Tensor, interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, diff --git a/torchvision/datapoints/_video.py b/torchvision/datapoints/_video.py index a6fbe2bd473..ae7f19f0a69 100644 --- a/torchvision/datapoints/_video.py +++ b/torchvision/datapoints/_video.py @@ -58,15 +58,15 @@ def num_channels(self) -> int: def num_frames(self) -> int: return self.shape[-4] - def horizontal_flip(self) -> Video: + def _horizontal_flip(self) -> Video: output = self._F.horizontal_flip_video(self.as_subclass(torch.Tensor)) return Video.wrap_like(self, output) - def vertical_flip(self) -> Video: + def _vertical_flip(self) -> Video: output = self._F.vertical_flip_video(self.as_subclass(torch.Tensor)) return Video.wrap_like(self, output) - def resize( # type: ignore[override] + def _resize( self, size: List[int], interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, @@ -82,15 +82,15 @@ def resize( # type: ignore[override] ) return Video.wrap_like(self, output) - def crop(self, top: int, left: int, height: int, width: int) -> Video: + def _crop(self, top: int, left: int, height: int, width: int) -> Video: output = self._F.crop_video(self.as_subclass(torch.Tensor), top, left, height, width) return Video.wrap_like(self, output) - def center_crop(self, output_size: List[int]) -> Video: + def _center_crop(self, output_size: List[int]) -> Video: output = self._F.center_crop_video(self.as_subclass(torch.Tensor), output_size=output_size) return Video.wrap_like(self, output) - def resized_crop( + def _resized_crop( self, top: int, left: int, @@ -112,7 +112,7 @@ def resized_crop( ) return Video.wrap_like(self, output) - def pad( + def _pad( self, padding: List[int], fill: Optional[Union[int, float, List[float]]] = None, @@ -121,7 +121,7 @@ def pad( output = self._F.pad_video(self.as_subclass(torch.Tensor), padding, fill=fill, padding_mode=padding_mode) return Video.wrap_like(self, output) - def rotate( + def _rotate( self, angle: float, interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, @@ -134,7 +134,7 @@ def rotate( ) return Video.wrap_like(self, output) - def affine( + def _affine( self, angle: Union[int, float], translate: List[float], @@ -156,7 +156,7 @@ def affine( ) return Video.wrap_like(self, output) - def perspective( + def _perspective( self, startpoints: Optional[List[List[int]]], endpoints: Optional[List[List[int]]], @@ -174,7 +174,7 @@ def perspective( ) return Video.wrap_like(self, output) - def elastic( + def _elastic( self, displacement: torch.Tensor, interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, @@ -185,61 +185,61 @@ def elastic( ) return Video.wrap_like(self, output) - def rgb_to_grayscale(self, num_output_channels: int = 1) -> Video: + def _rgb_to_grayscale(self, num_output_channels: int = 1) -> Video: output = self._F.rgb_to_grayscale_image_tensor( self.as_subclass(torch.Tensor), num_output_channels=num_output_channels ) return Video.wrap_like(self, output) - def adjust_brightness(self, brightness_factor: float) -> Video: + def _adjust_brightness(self, brightness_factor: float) -> Video: output = self._F.adjust_brightness_video(self.as_subclass(torch.Tensor), brightness_factor=brightness_factor) return Video.wrap_like(self, output) - def adjust_saturation(self, saturation_factor: float) -> Video: + def _adjust_saturation(self, saturation_factor: float) -> Video: output = self._F.adjust_saturation_video(self.as_subclass(torch.Tensor), saturation_factor=saturation_factor) return Video.wrap_like(self, output) - def adjust_contrast(self, contrast_factor: float) -> Video: + def _adjust_contrast(self, contrast_factor: float) -> Video: output = self._F.adjust_contrast_video(self.as_subclass(torch.Tensor), contrast_factor=contrast_factor) return Video.wrap_like(self, output) - def adjust_sharpness(self, sharpness_factor: float) -> Video: + def _adjust_sharpness(self, sharpness_factor: float) -> Video: output = self._F.adjust_sharpness_video(self.as_subclass(torch.Tensor), sharpness_factor=sharpness_factor) return Video.wrap_like(self, output) - def adjust_hue(self, hue_factor: float) -> Video: + def _adjust_hue(self, hue_factor: float) -> Video: output = self._F.adjust_hue_video(self.as_subclass(torch.Tensor), hue_factor=hue_factor) return Video.wrap_like(self, output) - def adjust_gamma(self, gamma: float, gain: float = 1) -> Video: + def _adjust_gamma(self, gamma: float, gain: float = 1) -> Video: output = self._F.adjust_gamma_video(self.as_subclass(torch.Tensor), gamma=gamma, gain=gain) return Video.wrap_like(self, output) - def posterize(self, bits: int) -> Video: + def _posterize(self, bits: int) -> Video: output = self._F.posterize_video(self.as_subclass(torch.Tensor), bits=bits) return Video.wrap_like(self, output) - def solarize(self, threshold: float) -> Video: + def _solarize(self, threshold: float) -> Video: output = self._F.solarize_video(self.as_subclass(torch.Tensor), threshold=threshold) return Video.wrap_like(self, output) - def autocontrast(self) -> Video: + def _autocontrast(self) -> Video: output = self._F.autocontrast_video(self.as_subclass(torch.Tensor)) return Video.wrap_like(self, output) - def equalize(self) -> Video: + def _equalize(self) -> Video: output = self._F.equalize_video(self.as_subclass(torch.Tensor)) return Video.wrap_like(self, output) - def invert(self) -> Video: + def _invert(self) -> Video: output = self._F.invert_video(self.as_subclass(torch.Tensor)) return Video.wrap_like(self, output) - def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Video: + def _gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Video: output = self._F.gaussian_blur_video(self.as_subclass(torch.Tensor), kernel_size=kernel_size, sigma=sigma) return Video.wrap_like(self, output) - def normalize(self, mean: List[float], std: List[float], inplace: bool = False) -> Video: + def _normalize(self, mean: List[float], std: List[float], inplace: bool = False) -> Video: output = self._F.normalize_video(self.as_subclass(torch.Tensor), mean=mean, std=std, inplace=inplace) return Video.wrap_like(self, output) diff --git a/torchvision/transforms/v2/functional/_color.py b/torchvision/transforms/v2/functional/_color.py index 13417e4a990..a2acd051c64 100644 --- a/torchvision/transforms/v2/functional/_color.py +++ b/torchvision/transforms/v2/functional/_color.py @@ -46,7 +46,7 @@ def rgb_to_grayscale( if torch.jit.is_scripting() or is_simple_tensor(inpt): return rgb_to_grayscale_image_tensor(inpt, num_output_channels=num_output_channels) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.rgb_to_grayscale(num_output_channels=num_output_channels) + return inpt._rgb_to_grayscale(num_output_channels=num_output_channels) elif isinstance(inpt, PIL.Image.Image): return rgb_to_grayscale_image_pil(inpt, num_output_channels=num_output_channels) else: @@ -97,7 +97,7 @@ def adjust_brightness(inpt: datapoints._InputTypeJIT, brightness_factor: float) if torch.jit.is_scripting() or is_simple_tensor(inpt): return adjust_brightness_image_tensor(inpt, brightness_factor=brightness_factor) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.adjust_brightness(brightness_factor=brightness_factor) + return inpt._adjust_brightness(brightness_factor=brightness_factor) elif isinstance(inpt, PIL.Image.Image): return adjust_brightness_image_pil(inpt, brightness_factor=brightness_factor) else: @@ -141,7 +141,7 @@ def adjust_saturation(inpt: datapoints._InputTypeJIT, saturation_factor: float) ): return adjust_saturation_image_tensor(inpt, saturation_factor=saturation_factor) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.adjust_saturation(saturation_factor=saturation_factor) + return inpt._adjust_saturation(saturation_factor=saturation_factor) elif isinstance(inpt, PIL.Image.Image): return adjust_saturation_image_pil(inpt, saturation_factor=saturation_factor) else: @@ -183,7 +183,7 @@ def adjust_contrast(inpt: datapoints._InputTypeJIT, contrast_factor: float) -> d if torch.jit.is_scripting() or is_simple_tensor(inpt): return adjust_contrast_image_tensor(inpt, contrast_factor=contrast_factor) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.adjust_contrast(contrast_factor=contrast_factor) + return inpt._adjust_contrast(contrast_factor=contrast_factor) elif isinstance(inpt, PIL.Image.Image): return adjust_contrast_image_pil(inpt, contrast_factor=contrast_factor) else: @@ -261,7 +261,7 @@ def adjust_sharpness(inpt: datapoints._InputTypeJIT, sharpness_factor: float) -> ): return adjust_sharpness_image_tensor(inpt, sharpness_factor=sharpness_factor) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.adjust_sharpness(sharpness_factor=sharpness_factor) + return inpt._adjust_sharpness(sharpness_factor=sharpness_factor) elif isinstance(inpt, PIL.Image.Image): return adjust_sharpness_image_pil(inpt, sharpness_factor=sharpness_factor) else: @@ -376,7 +376,7 @@ def adjust_hue(inpt: datapoints._InputTypeJIT, hue_factor: float) -> datapoints. if torch.jit.is_scripting() or is_simple_tensor(inpt): return adjust_hue_image_tensor(inpt, hue_factor=hue_factor) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.adjust_hue(hue_factor=hue_factor) + return inpt._adjust_hue(hue_factor=hue_factor) elif isinstance(inpt, PIL.Image.Image): return adjust_hue_image_pil(inpt, hue_factor=hue_factor) else: @@ -419,7 +419,7 @@ def adjust_gamma(inpt: datapoints._InputTypeJIT, gamma: float, gain: float = 1) if torch.jit.is_scripting() or is_simple_tensor(inpt): return adjust_gamma_image_tensor(inpt, gamma=gamma, gain=gain) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.adjust_gamma(gamma=gamma, gain=gain) + return inpt._adjust_gamma(gamma=gamma, gain=gain) elif isinstance(inpt, PIL.Image.Image): return adjust_gamma_image_pil(inpt, gamma=gamma, gain=gain) else: @@ -456,7 +456,7 @@ def posterize(inpt: datapoints._InputTypeJIT, bits: int) -> datapoints._InputTyp if torch.jit.is_scripting() or is_simple_tensor(inpt): return posterize_image_tensor(inpt, bits=bits) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.posterize(bits=bits) + return inpt._posterize(bits=bits) elif isinstance(inpt, PIL.Image.Image): return posterize_image_pil(inpt, bits=bits) else: @@ -487,7 +487,7 @@ def solarize(inpt: datapoints._InputTypeJIT, threshold: float) -> datapoints._In if torch.jit.is_scripting() or is_simple_tensor(inpt): return solarize_image_tensor(inpt, threshold=threshold) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.solarize(threshold=threshold) + return inpt._solarize(threshold=threshold) elif isinstance(inpt, PIL.Image.Image): return solarize_image_pil(inpt, threshold=threshold) else: @@ -540,7 +540,7 @@ def autocontrast(inpt: datapoints._InputTypeJIT) -> datapoints._InputTypeJIT: if torch.jit.is_scripting() or is_simple_tensor(inpt): return autocontrast_image_tensor(inpt) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.autocontrast() + return inpt._autocontrast() elif isinstance(inpt, PIL.Image.Image): return autocontrast_image_pil(inpt) else: @@ -633,7 +633,7 @@ def equalize(inpt: datapoints._InputTypeJIT) -> datapoints._InputTypeJIT: if torch.jit.is_scripting() or is_simple_tensor(inpt): return equalize_image_tensor(inpt) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.equalize() + return inpt._equalize() elif isinstance(inpt, PIL.Image.Image): return equalize_image_pil(inpt) else: @@ -667,7 +667,7 @@ def invert(inpt: datapoints._InputTypeJIT) -> datapoints._InputTypeJIT: if torch.jit.is_scripting() or is_simple_tensor(inpt): return invert_image_tensor(inpt) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.invert() + return inpt._invert() elif isinstance(inpt, PIL.Image.Image): return invert_image_pil(inpt) else: diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index e1dd2866bc5..eaa1caf0c61 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -79,7 +79,7 @@ def horizontal_flip(inpt: datapoints._InputTypeJIT) -> datapoints._InputTypeJIT: if torch.jit.is_scripting() or is_simple_tensor(inpt): return horizontal_flip_image_tensor(inpt) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.horizontal_flip() + return inpt._horizontal_flip() elif isinstance(inpt, PIL.Image.Image): return horizontal_flip_image_pil(inpt) else: @@ -129,7 +129,7 @@ def vertical_flip(inpt: datapoints._InputTypeJIT) -> datapoints._InputTypeJIT: if torch.jit.is_scripting() or is_simple_tensor(inpt): return vertical_flip_image_tensor(inpt) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.vertical_flip() + return inpt._vertical_flip() elif isinstance(inpt, PIL.Image.Image): return vertical_flip_image_pil(inpt) else: @@ -314,7 +314,7 @@ def resize( if torch.jit.is_scripting() or is_simple_tensor(inpt): return resize_image_tensor(inpt, size, interpolation=interpolation, max_size=max_size, antialias=antialias) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.resize(size, interpolation=interpolation, max_size=max_size, antialias=antialias) + return inpt._resize(size, interpolation=interpolation, max_size=max_size, antialias=antialias) elif isinstance(inpt, PIL.Image.Image): if antialias is False: warnings.warn("Anti-alias option is always applied for PIL Image input. Argument antialias is ignored.") @@ -843,7 +843,7 @@ def affine( center=center, ) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.affine( + return inpt._affine( angle, translate=translate, scale=scale, shear=shear, interpolation=interpolation, fill=fill, center=center ) elif isinstance(inpt, PIL.Image.Image): @@ -1004,7 +1004,7 @@ def rotate( if torch.jit.is_scripting() or is_simple_tensor(inpt): return rotate_image_tensor(inpt, angle, interpolation=interpolation, expand=expand, fill=fill, center=center) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.rotate(angle, interpolation=interpolation, expand=expand, fill=fill, center=center) + return inpt._rotate(angle, interpolation=interpolation, expand=expand, fill=fill, center=center) elif isinstance(inpt, PIL.Image.Image): return rotate_image_pil(inpt, angle, interpolation=interpolation, expand=expand, fill=fill, center=center) else: @@ -1214,7 +1214,7 @@ def pad( return pad_image_tensor(inpt, padding, fill=fill, padding_mode=padding_mode) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.pad(padding, fill=fill, padding_mode=padding_mode) + return inpt._pad(padding, fill=fill, padding_mode=padding_mode) elif isinstance(inpt, PIL.Image.Image): return pad_image_pil(inpt, padding, fill=fill, padding_mode=padding_mode) else: @@ -1292,7 +1292,7 @@ def crop(inpt: datapoints._InputTypeJIT, top: int, left: int, height: int, width if torch.jit.is_scripting() or is_simple_tensor(inpt): return crop_image_tensor(inpt, top, left, height, width) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.crop(top, left, height, width) + return inpt._crop(top, left, height, width) elif isinstance(inpt, PIL.Image.Image): return crop_image_pil(inpt, top, left, height, width) else: @@ -1554,7 +1554,7 @@ def perspective( inpt, startpoints, endpoints, interpolation=interpolation, fill=fill, coefficients=coefficients ) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.perspective( + return inpt._perspective( startpoints, endpoints, interpolation=interpolation, fill=fill, coefficients=coefficients ) elif isinstance(inpt, PIL.Image.Image): @@ -1742,7 +1742,7 @@ def elastic( if torch.jit.is_scripting() or is_simple_tensor(inpt): return elastic_image_tensor(inpt, displacement, interpolation=interpolation, fill=fill) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.elastic(displacement, interpolation=interpolation, fill=fill) + return inpt._elastic(displacement, interpolation=interpolation, fill=fill) elif isinstance(inpt, PIL.Image.Image): return elastic_image_pil(inpt, displacement, interpolation=interpolation, fill=fill) else: @@ -1855,7 +1855,7 @@ def center_crop(inpt: datapoints._InputTypeJIT, output_size: List[int]) -> datap if torch.jit.is_scripting() or is_simple_tensor(inpt): return center_crop_image_tensor(inpt, output_size) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.center_crop(output_size) + return inpt._center_crop(output_size) elif isinstance(inpt, PIL.Image.Image): return center_crop_image_pil(inpt, output_size) else: @@ -1951,7 +1951,7 @@ def resized_crop( inpt, top, left, height, width, antialias=antialias, size=size, interpolation=interpolation ) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.resized_crop(top, left, height, width, antialias=antialias, size=size, interpolation=interpolation) + return inpt._resized_crop(top, left, height, width, antialias=antialias, size=size, interpolation=interpolation) elif isinstance(inpt, PIL.Image.Image): return resized_crop_image_pil(inpt, top, left, height, width, size=size, interpolation=interpolation) else: diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index 9abb3ac22ce..92c96dc7827 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -63,7 +63,7 @@ def normalize( if torch.jit.is_scripting() or is_simple_tensor(inpt): return normalize_image_tensor(inpt, mean=mean, std=std, inplace=inplace) elif isinstance(inpt, (datapoints.Image, datapoints.Video)): - return inpt.normalize(mean=mean, std=std, inplace=inplace) + return inpt._normalize(mean=mean, std=std, inplace=inplace) else: raise TypeError( f"Input can either be a plain tensor or an `Image` or `Video` datapoint, " f"but got {type(inpt)} instead." @@ -174,7 +174,7 @@ def gaussian_blur( if torch.jit.is_scripting() or is_simple_tensor(inpt): return gaussian_blur_image_tensor(inpt, kernel_size=kernel_size, sigma=sigma) elif isinstance(inpt, datapoints._datapoint.Datapoint): - return inpt.gaussian_blur(kernel_size=kernel_size, sigma=sigma) + return inpt._gaussian_blur(kernel_size=kernel_size, sigma=sigma) elif isinstance(inpt, PIL.Image.Image): return gaussian_blur_image_pil(inpt, kernel_size=kernel_size, sigma=sigma) else: