diff --git a/docs/source/beta_status.py b/docs/source/beta_status.py index cc79ca8972f..8871f6debbb 100644 --- a/docs/source/beta_status.py +++ b/docs/source/beta_status.py @@ -12,18 +12,8 @@ def run(self): return [self.node("", nodes.paragraph("", "", nodes.Text(text)))] -class V2BetaStatus(BetaStatus): - text = ( - "The {api_name} is in Beta stage, and while we do not expect disruptive breaking changes, " - "some APIs may slightly change according to user feedback. Please submit any feedback you may have " - "in this issue: https://github.com/pytorch/vision/issues/6753." - ) - node = nodes.note - - def setup(app): app.add_directive("betastatus", BetaStatus) - app.add_directive("v2betastatus", V2BetaStatus) return { "version": "0.1", "parallel_read_safe": True, diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst index 2aa1fc5ba1e..19260f5f02a 100644 --- a/docs/source/transforms.rst +++ b/docs/source/transforms.rst @@ -126,13 +126,6 @@ you're already using tranforms from ``torchvision.transforms``, all you need to do to is to update the import to ``torchvision.transforms.v2``. In terms of output, there might be negligible differences due to implementation differences. -.. note:: - - The v2 transforms are still BETA, but at this point we do not expect - disruptive changes to be made to their public APIs. We're planning to make - them fully stable in version 0.17. Please submit any feedback you may have - `here `_. - .. _transforms_perf: Performance considerations diff --git a/torchvision/transforms/v2/_augment.py b/torchvision/transforms/v2/_augment.py index ad7fb861be2..caddcac811c 100644 --- a/torchvision/transforms/v2/_augment.py +++ b/torchvision/transforms/v2/_augment.py @@ -15,9 +15,7 @@ class RandomErasing(_RandomApplyTransform): - """[BETA] Randomly select a rectangle region in the input image or video and erase its pixels. - - .. v2betastatus:: RandomErasing transform + """Randomly select a rectangle region in the input image or video and erase its pixels. This transform does not support PIL Image. 'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/abs/1708.04896 @@ -207,9 +205,7 @@ def _mixup_label(self, label: torch.Tensor, *, lam: float) -> torch.Tensor: class MixUp(_BaseMixUpCutMix): - """[BETA] Apply MixUp to the provided batch of images and labels. - - .. v2betastatus:: MixUp transform + """Apply MixUp to the provided batch of images and labels. Paper: `mixup: Beyond Empirical Risk Minimization `_. @@ -256,9 +252,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class CutMix(_BaseMixUpCutMix): - """[BETA] Apply CutMix to the provided batch of images and labels. - - .. v2betastatus:: CutMix transform + """Apply CutMix to the provided batch of images and labels. Paper: `CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features `_. diff --git a/torchvision/transforms/v2/_auto_augment.py b/torchvision/transforms/v2/_auto_augment.py index 8ddd5aacdc3..193a1b280c6 100644 --- a/torchvision/transforms/v2/_auto_augment.py +++ b/torchvision/transforms/v2/_auto_augment.py @@ -174,11 +174,9 @@ def _apply_image_or_video_transform( class AutoAugment(_AutoAugmentBase): - r"""[BETA] AutoAugment data augmentation method based on + r"""AutoAugment data augmentation method based on `"AutoAugment: Learning Augmentation Strategies from Data" `_. - .. v2betastatus:: AutoAugment transform - This transformation works on images and videos only. If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected @@ -350,12 +348,10 @@ def forward(self, *inputs: Any) -> Any: class RandAugment(_AutoAugmentBase): - r"""[BETA] RandAugment data augmentation method based on + r"""RandAugment data augmentation method based on `"RandAugment: Practical automated data augmentation with a reduced search space" `_. - .. v2betastatus:: RandAugment transform - This transformation works on images and videos only. If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected @@ -434,11 +430,9 @@ def forward(self, *inputs: Any) -> Any: class TrivialAugmentWide(_AutoAugmentBase): - r"""[BETA] Dataset-independent data-augmentation with TrivialAugment Wide, as described in + r"""Dataset-independent data-augmentation with TrivialAugment Wide, as described in `"TrivialAugment: Tuning-free Yet State-of-the-Art Data Augmentation" `_. - .. v2betastatus:: TrivialAugmentWide transform - This transformation works on images and videos only. If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected @@ -505,11 +499,9 @@ def forward(self, *inputs: Any) -> Any: class AugMix(_AutoAugmentBase): - r"""[BETA] AugMix data augmentation method based on + r"""AugMix data augmentation method based on `"AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty" `_. - .. v2betastatus:: AugMix transform - This transformation works on images and videos only. If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected diff --git a/torchvision/transforms/v2/_color.py b/torchvision/transforms/v2/_color.py index 2715eefa21c..36ba6095802 100644 --- a/torchvision/transforms/v2/_color.py +++ b/torchvision/transforms/v2/_color.py @@ -10,9 +10,7 @@ class Grayscale(Transform): - """[BETA] Convert images or videos to grayscale. - - .. v2betastatus:: Grayscale transform + """Convert images or videos to grayscale. If the input is a :class:`torch.Tensor`, it is expected to have [..., 3 or 1, H, W] shape, where ... means an arbitrary number of leading dimensions @@ -32,9 +30,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomGrayscale(_RandomApplyTransform): - """[BETA] Randomly convert image or videos to grayscale with a probability of p (default 0.1). - - .. v2betastatus:: RandomGrayscale transform + """Randomly convert image or videos to grayscale with a probability of p (default 0.1). If the input is a :class:`torch.Tensor`, it is expected to have [..., 3 or 1, H, W] shape, where ... means an arbitrary number of leading dimensions @@ -59,9 +55,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class ColorJitter(Transform): - """[BETA] Randomly change the brightness, contrast, saturation and hue of an image or video. - - .. v2betastatus:: ColorJitter transform + """Randomly change the brightness, contrast, saturation and hue of an image or video. If the input is a :class:`torch.Tensor`, it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. @@ -163,10 +157,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomChannelPermutation(Transform): - """[BETA] Randomly permute the channels of an image or video - - .. v2betastatus:: RandomChannelPermutation transform - """ + """Randomly permute the channels of an image or video""" def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]: num_channels, *_ = query_chw(flat_inputs) @@ -177,11 +168,9 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomPhotometricDistort(Transform): - """[BETA] Randomly distorts the image or video as used in `SSD: Single Shot + """Randomly distorts the image or video as used in `SSD: Single Shot MultiBox Detector `_. - .. v2betastatus:: RandomPhotometricDistort transform - This transform relies on :class:`~torchvision.transforms.v2.ColorJitter` under the hood to adjust the contrast, saturation, hue, brightness, and also randomly permutes channels. @@ -249,9 +238,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomEqualize(_RandomApplyTransform): - """[BETA] Equalize the histogram of the given image or video with a given probability. - - .. v2betastatus:: RandomEqualize transform + """Equalize the histogram of the given image or video with a given probability. If the input is a :class:`torch.Tensor`, it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. @@ -268,9 +255,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomInvert(_RandomApplyTransform): - """[BETA] Inverts the colors of the given image or video with a given probability. - - .. v2betastatus:: RandomInvert transform + """Inverts the colors of the given image or video with a given probability. If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format, where ... means it can have an arbitrary number of leading dimensions. @@ -287,11 +272,9 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomPosterize(_RandomApplyTransform): - """[BETA] Posterize the image or video with a given probability by reducing the + """Posterize the image or video with a given probability by reducing the number of bits for each color channel. - .. v2betastatus:: RandomPosterize transform - If the input is a :class:`torch.Tensor`, it should be of type torch.uint8, and it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. If img is PIL Image, it is expected to be in mode "L" or "RGB". @@ -312,11 +295,9 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomSolarize(_RandomApplyTransform): - """[BETA] Solarize the image or video with a given probability by inverting all pixel + """Solarize the image or video with a given probability by inverting all pixel values above a threshold. - .. v2betastatus:: RandomSolarize transform - If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format, where ... means it can have an arbitrary number of leading dimensions. If img is PIL Image, it is expected to be in mode "L" or "RGB". @@ -342,9 +323,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomAutocontrast(_RandomApplyTransform): - """[BETA] Autocontrast the pixels of the given image or video with a given probability. - - .. v2betastatus:: RandomAutocontrast transform + """Autocontrast the pixels of the given image or video with a given probability. If the input is a :class:`torch.Tensor`, it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. @@ -361,9 +340,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomAdjustSharpness(_RandomApplyTransform): - """[BETA] Adjust the sharpness of the image or video with a given probability. - - .. v2betastatus:: RandomAdjustSharpness transform + """Adjust the sharpness of the image or video with a given probability. If the input is a :class:`torch.Tensor`, it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. diff --git a/torchvision/transforms/v2/_container.py b/torchvision/transforms/v2/_container.py index d57c2a72009..54de601c696 100644 --- a/torchvision/transforms/v2/_container.py +++ b/torchvision/transforms/v2/_container.py @@ -8,9 +8,7 @@ class Compose(Transform): - """[BETA] Composes several transforms together. - - .. v2betastatus:: Compose transform + """Composes several transforms together. This transform does not support torchscript. Please, see the note below. @@ -62,9 +60,7 @@ def extra_repr(self) -> str: class RandomApply(Transform): - """[BETA] Apply randomly a list of transformations with a given probability. - - .. v2betastatus:: RandomApply transform + """Apply randomly a list of transformations with a given probability. .. note:: In order to script the transformation, please use ``torch.nn.ModuleList`` as input instead of list/tuple of @@ -118,9 +114,7 @@ def extra_repr(self) -> str: class RandomChoice(Transform): - """[BETA] Apply single transformation randomly picked from a list. - - .. v2betastatus:: RandomChoice transform + """Apply single transformation randomly picked from a list. This transform does not support torchscript. @@ -157,9 +151,7 @@ def forward(self, *inputs: Any) -> Any: class RandomOrder(Transform): - """[BETA] Apply a list of transformations in a random order. - - .. v2betastatus:: RandomOrder transform + """Apply a list of transformations in a random order. This transform does not support torchscript. diff --git a/torchvision/transforms/v2/_deprecated.py b/torchvision/transforms/v2/_deprecated.py index 7ffa7194361..05c36a2f2c3 100644 --- a/torchvision/transforms/v2/_deprecated.py +++ b/torchvision/transforms/v2/_deprecated.py @@ -10,12 +10,10 @@ class ToTensor(Transform): - """[BETA] [DEPRECATED] Use ``v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])`` instead. + """[DEPRECATED] Use ``v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])`` instead. Convert a PIL Image or ndarray to tensor and scale the values accordingly. - .. v2betastatus:: ToTensor transform - .. warning:: :class:`v2.ToTensor` is deprecated and will be removed in a future release. Please use instead ``v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])``. diff --git a/torchvision/transforms/v2/_geometry.py b/torchvision/transforms/v2/_geometry.py index 4d3f3fc7fc5..e4ba3d9824b 100644 --- a/torchvision/transforms/v2/_geometry.py +++ b/torchvision/transforms/v2/_geometry.py @@ -31,9 +31,7 @@ class RandomHorizontalFlip(_RandomApplyTransform): - """[BETA] Horizontally flip the input with a given probability. - - .. v2betastatus:: RandomHorizontalFlip transform + """Horizontally flip the input with a given probability. If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`, :class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.) @@ -51,9 +49,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomVerticalFlip(_RandomApplyTransform): - """[BETA] Vertically flip the input with a given probability. - - .. v2betastatus:: RandomVerticalFlip transform + """Vertically flip the input with a given probability. If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`, :class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.) @@ -71,9 +67,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class Resize(Transform): - """[BETA] Resize the input to the given size. - - .. v2betastatus:: Resize transform + """Resize the input to the given size. If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`, :class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.) @@ -159,9 +153,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class CenterCrop(Transform): - """[BETA] Crop the input at the center. - - .. v2betastatus:: CenterCrop transform + """Crop the input at the center. If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`, :class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.) @@ -187,9 +179,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomResizedCrop(Transform): - """[BETA] Crop a random portion of the input and resize it to a given size. - - .. v2betastatus:: RandomResizedCrop transform + """Crop a random portion of the input and resize it to a given size. If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`, :class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.) @@ -310,9 +300,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class FiveCrop(Transform): - """[BETA] Crop the image or video into four corners and the central crop. - - .. v2betastatus:: FiveCrop transform + """Crop the image or video into four corners and the central crop. If the input is a :class:`torch.Tensor` or a :class:`~torchvision.tv_tensors.Image` or a :class:`~torchvision.tv_tensors.Video` it can have arbitrary number of leading batch dimensions. @@ -371,11 +359,9 @@ def _check_inputs(self, flat_inputs: List[Any]) -> None: class TenCrop(Transform): - """[BETA] Crop the image or video into four corners and the central crop plus the flipped version of + """Crop the image or video into four corners and the central crop plus the flipped version of these (horizontal flipping is used by default). - .. v2betastatus:: TenCrop transform - If the input is a :class:`torch.Tensor` or a :class:`~torchvision.tv_tensors.Image` or a :class:`~torchvision.tv_tensors.Video` it can have arbitrary number of leading batch dimensions. For example, the image can have ``[..., C, H, W]`` shape. @@ -418,9 +404,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class Pad(Transform): - """[BETA] Pad the input on all sides with the given "pad" value. - - .. v2betastatus:: Pad transform + """Pad the input on all sides with the given "pad" value. If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`, :class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.) @@ -492,11 +476,9 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomZoomOut(_RandomApplyTransform): - """[BETA] "Zoom out" transformation from + """ "Zoom out" transformation from `"SSD: Single Shot MultiBox Detector" `_. - .. v2betastatus:: RandomZoomOut transform - This transformation randomly pads images, videos, bounding boxes and masks creating a zoom out effect. Output spatial size is randomly sampled from original size up to a maximum size configured with ``side_range`` parameter: @@ -562,9 +544,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomRotation(Transform): - """[BETA] Rotate the input by angle. - - .. v2betastatus:: RandomRotation transform + """Rotate the input by angle. If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`, :class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.) @@ -643,9 +623,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomAffine(Transform): - """[BETA] Random affine transformation the input keeping center invariant. - - .. v2betastatus:: RandomAffine transform + """Random affine transformation the input keeping center invariant. If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`, :class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.) @@ -765,9 +743,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomCrop(Transform): - """[BETA] Crop the input at a random location. - - .. v2betastatus:: RandomCrop transform + """Crop the input at a random location. If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`, :class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.) @@ -922,9 +898,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomPerspective(_RandomApplyTransform): - """[BETA] Perform a random perspective transformation of the input with a given probability. - - .. v2betastatus:: RandomPerspective transform + """Perform a random perspective transformation of the input with a given probability. If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`, :class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.) @@ -1009,9 +983,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class ElasticTransform(Transform): - """[BETA] Transform the input with elastic transformations. - - .. v2betastatus:: RandomPerspective transform + """Transform the input with elastic transformations. If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`, :class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.) @@ -1101,11 +1073,9 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomIoUCrop(Transform): - """[BETA] Random IoU crop transformation from + """Random IoU crop transformation from `"SSD: Single Shot MultiBox Detector" `_. - .. v2betastatus:: RandomIoUCrop transform - This transformation requires an image or video data and ``tv_tensors.BoundingBoxes`` in the input. .. warning:: @@ -1229,11 +1199,9 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class ScaleJitter(Transform): - """[BETA] Perform Large Scale Jitter on the input according to + """Perform Large Scale Jitter on the input according to `"Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation" `_. - .. v2betastatus:: ScaleJitter transform - If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`, :class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.) it can have arbitrary number of leading batch dimensions. For example, @@ -1298,9 +1266,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomShortestSize(Transform): - """[BETA] Randomly resize the input. - - .. v2betastatus:: RandomShortestSize transform + """Randomly resize the input. If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`, :class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.) @@ -1368,9 +1334,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class RandomResize(Transform): - """[BETA] Randomly resize the input. - - .. v2betastatus:: RandomResize transform + """Randomly resize the input. This transformation can be used together with ``RandomCrop`` as data augmentations to train models on image segmentation task. diff --git a/torchvision/transforms/v2/_meta.py b/torchvision/transforms/v2/_meta.py index 9fa31ebef94..badb630082c 100644 --- a/torchvision/transforms/v2/_meta.py +++ b/torchvision/transforms/v2/_meta.py @@ -5,9 +5,7 @@ class ConvertBoundingBoxFormat(Transform): - """[BETA] Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY". - - .. v2betastatus:: ConvertBoundingBoxFormat transform + """Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY". Args: format (str or tv_tensors.BoundingBoxFormat): output bounding box format. @@ -28,12 +26,10 @@ def _transform(self, inpt: tv_tensors.BoundingBoxes, params: Dict[str, Any]) -> class ClampBoundingBoxes(Transform): - """[BETA] Clamp bounding boxes to their corresponding image dimensions. + """Clamp bounding boxes to their corresponding image dimensions. The clamping is done according to the bounding boxes' ``canvas_size`` meta-data. - .. v2betastatus:: ClampBoundingBoxes transform - """ _transformed_types = (tv_tensors.BoundingBoxes,) diff --git a/torchvision/transforms/v2/_misc.py b/torchvision/transforms/v2/_misc.py index 67aaf4f3753..6057e928115 100644 --- a/torchvision/transforms/v2/_misc.py +++ b/torchvision/transforms/v2/_misc.py @@ -19,9 +19,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class Lambda(Transform): - """[BETA] Apply a user-defined function as a transform. - - .. v2betastatus:: Lambda transform + """Apply a user-defined function as a transform. This transform does not support torchscript. @@ -52,9 +50,7 @@ def extra_repr(self) -> str: class LinearTransformation(Transform): - """[BETA] Transform a tensor image or video with a square transformation matrix and a mean_vector computed offline. - - .. v2betastatus:: LinearTransformation transform + """Transform a tensor image or video with a square transformation matrix and a mean_vector computed offline. This transform does not support PIL Image. Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and @@ -135,9 +131,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class Normalize(Transform): - """[BETA] Normalize a tensor image or video with mean and standard deviation. - - .. v2betastatus:: Normalize transform + """Normalize a tensor image or video with mean and standard deviation. This transform does not support PIL Image. Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n`` @@ -172,9 +166,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class GaussianBlur(Transform): - """[BETA] Blurs image with randomly chosen Gaussian blur. - - .. v2betastatus:: GausssianBlur transform + """Blurs image with randomly chosen Gaussian blur. If the input is a Tensor, it is expected to have [..., C, H, W] shape, where ... means an arbitrary number of leading dimensions. @@ -212,9 +204,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class ToDtype(Transform): - """[BETA] Converts the input to a specific dtype, optionally scaling the values for images or videos. - - .. v2betastatus:: ToDtype transform + """Converts the input to a specific dtype, optionally scaling the values for images or videos. .. note:: ``ToDtype(dtype, scale=True)`` is the recommended replacement for ``ConvertImageDtype(dtype)``. @@ -286,12 +276,10 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class ConvertImageDtype(Transform): - """[BETA] [DEPRECATED] Use ``v2.ToDtype(dtype, scale=True)`` instead. + """[DEPRECATED] Use ``v2.ToDtype(dtype, scale=True)`` instead. Convert input image to the given ``dtype`` and scale the values accordingly. - .. v2betastatus:: ConvertImageDtype transform - .. warning:: Consider using ``ToDtype(dtype, scale=True)`` instead. See :class:`~torchvision.transforms.v2.ToDtype`. @@ -323,9 +311,7 @@ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: class SanitizeBoundingBoxes(Transform): - """[BETA] Remove degenerate/invalid bounding boxes and their corresponding labels and masks. - - .. v2betastatus:: SanitizeBoundingBoxes transform + """Remove degenerate/invalid bounding boxes and their corresponding labels and masks. This transform removes bounding boxes and their associated labels/masks that: diff --git a/torchvision/transforms/v2/_temporal.py b/torchvision/transforms/v2/_temporal.py index df39cde0ecd..c59d5078d46 100644 --- a/torchvision/transforms/v2/_temporal.py +++ b/torchvision/transforms/v2/_temporal.py @@ -5,9 +5,7 @@ class UniformTemporalSubsample(Transform): - """[BETA] Uniformly subsample ``num_samples`` indices from the temporal dimension of the video. - - .. v2betastatus:: UniformTemporalSubsample transform + """Uniformly subsample ``num_samples`` indices from the temporal dimension of the video. Videos are expected to be of shape ``[..., T, C, H, W]`` where ``T`` denotes the temporal dimension. diff --git a/torchvision/transforms/v2/_type_conversion.py b/torchvision/transforms/v2/_type_conversion.py index 9888fb2a476..7c7439b1d02 100644 --- a/torchvision/transforms/v2/_type_conversion.py +++ b/torchvision/transforms/v2/_type_conversion.py @@ -11,9 +11,7 @@ class PILToTensor(Transform): - """[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values. - - .. v2betastatus:: PILToTensor transform + """Convert a PIL Image to a tensor of the same type - this does not scale values. This transform does not support torchscript. @@ -27,11 +25,9 @@ def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Ten class ToImage(Transform): - """[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.tv_tensors.Image` + """Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.tv_tensors.Image` ; this does not scale values. - .. v2betastatus:: ToImage transform - This transform does not support torchscript. """ @@ -44,9 +40,7 @@ def _transform( class ToPILImage(Transform): - """[BETA] Convert a tensor or an ndarray to PIL Image - - .. v2betastatus:: ToPILImage transform + """Convert a tensor or an ndarray to PIL Image This transform does not support torchscript. @@ -79,9 +73,7 @@ def _transform( class ToPureTensor(Transform): - """[BETA] Convert all TVTensors to pure tensors, removing associated metadata (if any). - - .. v2betastatus:: ToPureTensor transform + """Convert all TVTensors to pure tensors, removing associated metadata (if any). This doesn't scale or change the values, only the type. """ diff --git a/torchvision/transforms/v2/functional/_augment.py b/torchvision/transforms/v2/functional/_augment.py index c9b1f3951fe..78d4c354160 100644 --- a/torchvision/transforms/v2/functional/_augment.py +++ b/torchvision/transforms/v2/functional/_augment.py @@ -17,7 +17,7 @@ def erase( v: torch.Tensor, inplace: bool = False, ) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.RandomErase` for details.""" + """See :class:`~torchvision.transforms.v2.RandomErase` for details.""" if torch.jit.is_scripting(): return erase_image(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace) diff --git a/torchvision/transforms/v2/functional/_color.py b/torchvision/transforms/v2/functional/_color.py index 33ac7bc3b01..b0189fd95ef 100644 --- a/torchvision/transforms/v2/functional/_color.py +++ b/torchvision/transforms/v2/functional/_color.py @@ -15,7 +15,7 @@ def rgb_to_grayscale(inpt: torch.Tensor, num_output_channels: int = 1) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.Grayscale` for details.""" + """See :class:`~torchvision.transforms.v2.Grayscale` for details.""" if torch.jit.is_scripting(): return rgb_to_grayscale_image(inpt, num_output_channels=num_output_channels) @@ -147,7 +147,7 @@ def adjust_saturation_video(video: torch.Tensor, saturation_factor: float) -> to def adjust_contrast(inpt: torch.Tensor, contrast_factor: float) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.RandomAutocontrast`""" + """See :class:`~torchvision.transforms.RandomAutocontrast`""" if torch.jit.is_scripting(): return adjust_contrast_image(inpt, contrast_factor=contrast_factor) @@ -186,7 +186,7 @@ def adjust_contrast_video(video: torch.Tensor, contrast_factor: float) -> torch. def adjust_sharpness(inpt: torch.Tensor, sharpness_factor: float) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.RandomAdjustSharpness`""" + """See :class:`~torchvision.transforms.RandomAdjustSharpness`""" if torch.jit.is_scripting(): return adjust_sharpness_image(inpt, sharpness_factor=sharpness_factor) @@ -417,7 +417,7 @@ def adjust_gamma_video(video: torch.Tensor, gamma: float, gain: float = 1) -> to def posterize(inpt: torch.Tensor, bits: int) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.RandomPosterize` for details.""" + """See :class:`~torchvision.transforms.v2.RandomPosterize` for details.""" if torch.jit.is_scripting(): return posterize_image(inpt, bits=bits) @@ -451,7 +451,7 @@ def posterize_video(video: torch.Tensor, bits: int) -> torch.Tensor: def solarize(inpt: torch.Tensor, threshold: float) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.RandomSolarize` for details.""" + """See :class:`~torchvision.transforms.v2.RandomSolarize` for details.""" if torch.jit.is_scripting(): return solarize_image(inpt, threshold=threshold) @@ -479,7 +479,7 @@ def solarize_video(video: torch.Tensor, threshold: float) -> torch.Tensor: def autocontrast(inpt: torch.Tensor) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.RandomAutocontrast` for details.""" + """See :class:`~torchvision.transforms.v2.RandomAutocontrast` for details.""" if torch.jit.is_scripting(): return autocontrast_image(inpt) @@ -529,7 +529,7 @@ def autocontrast_video(video: torch.Tensor) -> torch.Tensor: def equalize(inpt: torch.Tensor) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.RandomEqualize` for details.""" + """See :class:`~torchvision.transforms.v2.RandomEqualize` for details.""" if torch.jit.is_scripting(): return equalize_image(inpt) @@ -619,7 +619,7 @@ def equalize_video(video: torch.Tensor) -> torch.Tensor: def invert(inpt: torch.Tensor) -> torch.Tensor: - """[BETA] See :func:`~torchvision.transforms.v2.RandomInvert`.""" + """See :func:`~torchvision.transforms.v2.RandomInvert`.""" if torch.jit.is_scripting(): return invert_image(inpt) diff --git a/torchvision/transforms/v2/functional/_deprecated.py b/torchvision/transforms/v2/functional/_deprecated.py index 37b027c72bc..116ea31587a 100644 --- a/torchvision/transforms/v2/functional/_deprecated.py +++ b/torchvision/transforms/v2/functional/_deprecated.py @@ -8,7 +8,7 @@ @torch.jit.unused def to_tensor(inpt: Any) -> torch.Tensor: - """[BETA] [DEPREACTED] Use to_image() and to_dtype() instead.""" + """[DEPREACTED] Use to_image() and to_dtype() instead.""" warnings.warn( "The function `to_tensor(...)` is deprecated and will be removed in a future release. " "Instead, please use `to_image(...)` followed by `to_dtype(..., dtype=torch.float32, scale=True)`." diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index d6d42344fcb..d5ceb1aec9d 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -39,7 +39,7 @@ def _check_interpolation(interpolation: Union[InterpolationMode, int]) -> Interp def horizontal_flip(inpt: torch.Tensor) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.RandomHorizontalFlip` for details.""" + """See :class:`~torchvision.transforms.v2.RandomHorizontalFlip` for details.""" if torch.jit.is_scripting(): return horizontal_flip_image(inpt) @@ -96,7 +96,7 @@ def horizontal_flip_video(video: torch.Tensor) -> torch.Tensor: def vertical_flip(inpt: torch.Tensor) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.RandomVerticalFlip` for details.""" + """See :class:`~torchvision.transforms.v2.RandomVerticalFlip` for details.""" if torch.jit.is_scripting(): return vertical_flip_image(inpt) @@ -178,7 +178,7 @@ def resize( max_size: Optional[int] = None, antialias: Optional[bool] = True, ) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.Resize` for details.""" + """See :class:`~torchvision.transforms.v2.Resize` for details.""" if torch.jit.is_scripting(): return resize_image(inpt, size=size, interpolation=interpolation, max_size=max_size, antialias=antialias) @@ -373,7 +373,7 @@ def affine( fill: _FillTypeJIT = None, center: Optional[List[float]] = None, ) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.RandomAffine` for details.""" + """See :class:`~torchvision.transforms.v2.RandomAffine` for details.""" if torch.jit.is_scripting(): return affine_image( inpt, @@ -939,7 +939,7 @@ def rotate( center: Optional[List[float]] = None, fill: _FillTypeJIT = None, ) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.RandomRotation` for details.""" + """See :class:`~torchvision.transforms.v2.RandomRotation` for details.""" if torch.jit.is_scripting(): return rotate_image(inpt, angle=angle, interpolation=interpolation, expand=expand, fill=fill, center=center) @@ -1094,7 +1094,7 @@ def pad( fill: Optional[Union[int, float, List[float]]] = None, padding_mode: str = "constant", ) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.Pad` for details.""" + """See :class:`~torchvision.transforms.v2.Pad` for details.""" if torch.jit.is_scripting(): return pad_image(inpt, padding=padding, fill=fill, padding_mode=padding_mode) @@ -1314,7 +1314,7 @@ def pad_video( def crop(inpt: torch.Tensor, top: int, left: int, height: int, width: int) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.RandomCrop` for details.""" + """See :class:`~torchvision.transforms.v2.RandomCrop` for details.""" if torch.jit.is_scripting(): return crop_image(inpt, top=top, left=left, height=height, width=width) @@ -1408,7 +1408,7 @@ def perspective( fill: _FillTypeJIT = None, coefficients: Optional[List[float]] = None, ) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.RandomPerspective` for details.""" + """See :class:`~torchvision.transforms.v2.RandomPerspective` for details.""" if torch.jit.is_scripting(): return perspective_image( inpt, @@ -1696,7 +1696,7 @@ def elastic( interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, fill: _FillTypeJIT = None, ) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.ElasticTransform` for details.""" + """See :class:`~torchvision.transforms.v2.ElasticTransform` for details.""" if torch.jit.is_scripting(): return elastic_image(inpt, displacement=displacement, interpolation=interpolation, fill=fill) @@ -1880,7 +1880,7 @@ def elastic_video( def center_crop(inpt: torch.Tensor, output_size: List[int]) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.RandomCrop` for details.""" + """See :class:`~torchvision.transforms.v2.RandomCrop` for details.""" if torch.jit.is_scripting(): return center_crop_image(inpt, output_size=output_size) @@ -2009,7 +2009,7 @@ def resized_crop( interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, antialias: Optional[bool] = True, ) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.RandomResizedCrop` for details.""" + """See :class:`~torchvision.transforms.v2.RandomResizedCrop` for details.""" if torch.jit.is_scripting(): return resized_crop_image( inpt, @@ -2154,7 +2154,7 @@ def resized_crop_video( def five_crop( inpt: torch.Tensor, size: List[int] ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: - """[BETA] See :class:`~torchvision.transforms.v2.FiveCrop` for details.""" + """See :class:`~torchvision.transforms.v2.FiveCrop` for details.""" if torch.jit.is_scripting(): return five_crop_image(inpt, size=size) @@ -2238,7 +2238,7 @@ def ten_crop( torch.Tensor, torch.Tensor, ]: - """[BETA] See :class:`~torchvision.transforms.v2.TenCrop` for details.""" + """See :class:`~torchvision.transforms.v2.TenCrop` for details.""" if torch.jit.is_scripting(): return ten_crop_image(inpt, size=size, vertical_flip=vertical_flip) diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 5e045391630..e27aa18fc60 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -203,7 +203,7 @@ def convert_bounding_box_format( new_format: Optional[BoundingBoxFormat] = None, inplace: bool = False, ) -> torch.Tensor: - """[BETA] See :func:`~torchvision.transforms.v2.ConvertBoundingBoxFormat` for details.""" + """See :func:`~torchvision.transforms.v2.ConvertBoundingBoxFormat` for details.""" # This being a kernel / functional hybrid, we need an option to pass `old_format` explicitly for pure tensor # inputs as well as extract it from `tv_tensors.BoundingBoxes` inputs. However, putting a default value on # `old_format` means we also need to put one on `new_format` to have syntactically correct Python. Here we mimic the @@ -254,7 +254,7 @@ def clamp_bounding_boxes( format: Optional[BoundingBoxFormat] = None, canvas_size: Optional[Tuple[int, int]] = None, ) -> torch.Tensor: - """[BETA] See :func:`~torchvision.transforms.v2.ClampBoundingBoxes` for details.""" + """See :func:`~torchvision.transforms.v2.ClampBoundingBoxes` for details.""" if not torch.jit.is_scripting(): _log_api_usage_once(clamp_bounding_boxes) diff --git a/torchvision/transforms/v2/functional/_misc.py b/torchvision/transforms/v2/functional/_misc.py index 2a6468bb46a..6117aa33ea4 100644 --- a/torchvision/transforms/v2/functional/_misc.py +++ b/torchvision/transforms/v2/functional/_misc.py @@ -20,7 +20,7 @@ def normalize( std: List[float], inplace: bool = False, ) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.Normalize` for details.""" + """See :class:`~torchvision.transforms.v2.Normalize` for details.""" if torch.jit.is_scripting(): return normalize_image(inpt, mean=mean, std=std, inplace=inplace) @@ -71,7 +71,7 @@ def normalize_video(video: torch.Tensor, mean: List[float], std: List[float], in def gaussian_blur(inpt: torch.Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.GaussianBlur` for details.""" + """See :class:`~torchvision.transforms.v2.GaussianBlur` for details.""" if torch.jit.is_scripting(): return gaussian_blur_image(inpt, kernel_size=kernel_size, sigma=sigma) @@ -180,7 +180,7 @@ def gaussian_blur_video( def to_dtype(inpt: torch.Tensor, dtype: torch.dtype = torch.float, scale: bool = False) -> torch.Tensor: - """[BETA] See :func:`~torchvision.transforms.v2.ToDtype` for details.""" + """See :func:`~torchvision.transforms.v2.ToDtype` for details.""" if torch.jit.is_scripting(): return to_dtype_image(inpt, dtype=dtype, scale=scale) @@ -261,7 +261,7 @@ def to_dtype_image(image: torch.Tensor, dtype: torch.dtype = torch.float, scale: # We encourage users to use to_dtype() instead but we keep this for BC def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor: - """[BETA] [DEPRECATED] Use to_dtype() instead.""" + """[DEPRECATED] Use to_dtype() instead.""" return to_dtype_image(image, dtype=dtype, scale=True) diff --git a/torchvision/transforms/v2/functional/_temporal.py b/torchvision/transforms/v2/functional/_temporal.py index ca2903bbc68..f932b06a295 100644 --- a/torchvision/transforms/v2/functional/_temporal.py +++ b/torchvision/transforms/v2/functional/_temporal.py @@ -8,7 +8,7 @@ def uniform_temporal_subsample(inpt: torch.Tensor, num_samples: int) -> torch.Tensor: - """[BETA] See :class:`~torchvision.transforms.v2.UniformTemporalSubsample` for details.""" + """See :class:`~torchvision.transforms.v2.UniformTemporalSubsample` for details.""" if torch.jit.is_scripting(): return uniform_temporal_subsample_video(inpt, num_samples=num_samples) diff --git a/torchvision/transforms/v2/functional/_type_conversion.py b/torchvision/transforms/v2/functional/_type_conversion.py index 02aeda83df3..9ac357315b2 100644 --- a/torchvision/transforms/v2/functional/_type_conversion.py +++ b/torchvision/transforms/v2/functional/_type_conversion.py @@ -9,7 +9,7 @@ @torch.jit.unused def to_image(inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> tv_tensors.Image: - """[BETA] See :class:`~torchvision.transforms.v2.ToImage` for details.""" + """See :class:`~torchvision.transforms.v2.ToImage` for details.""" if isinstance(inpt, np.ndarray): output = torch.from_numpy(inpt).permute((2, 0, 1)).contiguous() elif isinstance(inpt, PIL.Image.Image): diff --git a/torchvision/transforms/v2/functional/_utils.py b/torchvision/transforms/v2/functional/_utils.py index 771833cbea1..fe0faeddc1b 100644 --- a/torchvision/transforms/v2/functional/_utils.py +++ b/torchvision/transforms/v2/functional/_utils.py @@ -67,7 +67,7 @@ def _name_to_functional(name): def register_kernel(functional, tv_tensor_cls): - """[BETA] Decorate a kernel to register it for a functional and a (custom) tv_tensor type. + """Decorate a kernel to register it for a functional and a (custom) tv_tensor type. See :ref:`sphx_glr_auto_examples_transforms_plot_custom_tv_tensors.py` for usage details. diff --git a/torchvision/tv_tensors/__init__.py b/torchvision/tv_tensors/__init__.py index 441502df2f0..1ba47f60a36 100644 --- a/torchvision/tv_tensors/__init__.py +++ b/torchvision/tv_tensors/__init__.py @@ -13,7 +13,7 @@ # Until `disable` is removed, there will be graph breaks after all calls to functional transforms @torch.compiler.disable def wrap(wrappee, *, like, **kwargs): - """[BETA] Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.tv_tensors.TVTensor` subclass as ``like``. + """Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.tv_tensors.TVTensor` subclass as ``like``. If ``like`` is a :class:`~torchvision.tv_tensors.BoundingBoxes`, the ``format`` and ``canvas_size`` of ``like`` are assigned to ``wrappee``, unless they are passed as ``kwargs``. diff --git a/torchvision/tv_tensors/_bounding_boxes.py b/torchvision/tv_tensors/_bounding_boxes.py index ce617ce47dc..56e77c2a85e 100644 --- a/torchvision/tv_tensors/_bounding_boxes.py +++ b/torchvision/tv_tensors/_bounding_boxes.py @@ -10,7 +10,7 @@ class BoundingBoxFormat(Enum): - """[BETA] Coordinate format of a bounding box. + """Coordinate format of a bounding box. Available formats are @@ -25,7 +25,7 @@ class BoundingBoxFormat(Enum): class BoundingBoxes(TVTensor): - """[BETA] :class:`torch.Tensor` subclass for bounding boxes. + """:class:`torch.Tensor` subclass for bounding boxes. .. note:: There should be only one :class:`~torchvision.tv_tensors.BoundingBoxes` diff --git a/torchvision/tv_tensors/_dataset_wrapper.py b/torchvision/tv_tensors/_dataset_wrapper.py index 04c3bf7133d..b95978cbfcb 100644 --- a/torchvision/tv_tensors/_dataset_wrapper.py +++ b/torchvision/tv_tensors/_dataset_wrapper.py @@ -17,9 +17,7 @@ def wrap_dataset_for_transforms_v2(dataset, target_keys=None): - """[BETA] Wrap a ``torchvision.dataset`` for usage with :mod:`torchvision.transforms.v2`. - - .. v2betastatus:: wrap_dataset_for_transforms_v2 function + """Wrap a ``torchvision.dataset`` for usage with :mod:`torchvision.transforms.v2`. Example: >>> dataset = torchvision.datasets.CocoDetection(...) diff --git a/torchvision/tv_tensors/_image.py b/torchvision/tv_tensors/_image.py index a785e4b3e7e..c2f82c8d0df 100644 --- a/torchvision/tv_tensors/_image.py +++ b/torchvision/tv_tensors/_image.py @@ -9,7 +9,7 @@ class Image(TVTensor): - """[BETA] :class:`torch.Tensor` subclass for images. + """:class:`torch.Tensor` subclass for images. .. note:: diff --git a/torchvision/tv_tensors/_mask.py b/torchvision/tv_tensors/_mask.py index 553fc581c50..a8f6f4d62cb 100644 --- a/torchvision/tv_tensors/_mask.py +++ b/torchvision/tv_tensors/_mask.py @@ -9,7 +9,7 @@ class Mask(TVTensor): - """[BETA] :class:`torch.Tensor` subclass for segmentation and detection masks. + """:class:`torch.Tensor` subclass for segmentation and detection masks. Args: data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as diff --git a/torchvision/tv_tensors/_torch_function_helpers.py b/torchvision/tv_tensors/_torch_function_helpers.py index 7edc471b110..e6ea5fddf35 100644 --- a/torchvision/tv_tensors/_torch_function_helpers.py +++ b/torchvision/tv_tensors/_torch_function_helpers.py @@ -16,7 +16,7 @@ def __exit__(self, *args): def set_return_type(return_type: str): - """[BETA] Set the return type of torch operations on :class:`~torchvision.tv_tensors.TVTensor`. + """Set the return type of torch operations on :class:`~torchvision.tv_tensors.TVTensor`. This only affects the behaviour of torch operations. It has no effect on ``torchvision`` transforms or functionals, which will always return as diff --git a/torchvision/tv_tensors/_tv_tensor.py b/torchvision/tv_tensors/_tv_tensor.py index 0c6af95af87..508e73724be 100644 --- a/torchvision/tv_tensors/_tv_tensor.py +++ b/torchvision/tv_tensors/_tv_tensor.py @@ -13,7 +13,7 @@ class TVTensor(torch.Tensor): - """[Beta] Base class for all TVTensors. + """Base class for all TVTensors. You probably don't want to use this class unless you're defining your own custom TVTensors. See diff --git a/torchvision/tv_tensors/_video.py b/torchvision/tv_tensors/_video.py index a1efe4fe406..a0466b001ee 100644 --- a/torchvision/tv_tensors/_video.py +++ b/torchvision/tv_tensors/_video.py @@ -8,7 +8,7 @@ class Video(TVTensor): - """[BETA] :class:`torch.Tensor` subclass for videos. + """:class:`torch.Tensor` subclass for videos. Args: data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`.