Skip to content

remove functionality scheduled for 0.15 after deprecation #7176

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Feb 7, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 0 additions & 61 deletions test/test_functional_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import itertools
import math
import os
import re
from functools import partial
from typing import Sequence

Expand Down Expand Up @@ -144,20 +143,6 @@ def test_rotate_batch(self, device, dt):
center = (20, 22)
_test_fn_on_batch(batch_tensors, F.rotate, angle=32, interpolation=NEAREST, expand=True, center=center)

def test_rotate_interpolation_type(self):
tensor, _ = _create_data(26, 26)
# assert changed type warning
with pytest.warns(
UserWarning,
match=re.escape(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
res1 = F.rotate(tensor, 45, interpolation=2)
res2 = F.rotate(tensor, 45, interpolation=BILINEAR)
assert_equal(res1, res2)


class TestAffine:

Expand Down Expand Up @@ -364,22 +349,6 @@ def test_batches(self, device, dt):

_test_fn_on_batch(batch_tensors, F.affine, angle=-43, translate=[-3, 4], scale=1.2, shear=[4.0, 5.0])

@pytest.mark.parametrize("device", cpu_and_gpu())
def test_warnings(self, device):
tensor, pil_img = _create_data(26, 26, device=device)

# assert changed type warning
with pytest.warns(
UserWarning,
match=re.escape(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
res1 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=2)
res2 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=BILINEAR)
assert_equal(res1, res2)


def _get_data_dims_and_points_for_perspective():
# Ideally we would parametrize independently over data dims and points, but
Expand Down Expand Up @@ -478,23 +447,6 @@ def test_perspective_batch(device, dims_and_points, dt):
)


def test_perspective_interpolation_warning():
# assert changed type warning
spoints = [[0, 0], [33, 0], [33, 25], [0, 25]]
epoints = [[3, 2], [32, 3], [30, 24], [2, 25]]
tensor = torch.randint(0, 256, (3, 26, 26))
with pytest.warns(
UserWarning,
match=re.escape(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
res1 = F.perspective(tensor, startpoints=spoints, endpoints=epoints, interpolation=2)
res2 = F.perspective(tensor, startpoints=spoints, endpoints=epoints, interpolation=BILINEAR)
assert_equal(res1, res2)


@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dt", [None, torch.float32, torch.float64, torch.float16])
@pytest.mark.parametrize(
Expand Down Expand Up @@ -568,19 +520,6 @@ def test_resize_asserts(device):

tensor, pil_img = _create_data(26, 36, device=device)

# assert changed type warning
with pytest.warns(
UserWarning,
match=re.escape(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
res1 = F.resize(tensor, size=32, interpolation=2)

res2 = F.resize(tensor, size=32, interpolation=BILINEAR)
assert_equal(res1, res2)

for img in (tensor, pil_img):
exp_msg = "max_size should only be passed if size specifies the length of the smaller edge"
with pytest.raises(ValueError, match=exp_msg):
Expand Down
6 changes: 0 additions & 6 deletions test/test_prototype_transforms_consistency.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,12 +87,6 @@ def __init__(
ArgsKwargs((32, 29)),
ArgsKwargs((31, 28), interpolation=prototype_transforms.InterpolationMode.NEAREST),
ArgsKwargs((33, 26), interpolation=prototype_transforms.InterpolationMode.BICUBIC),
# FIXME: these are currently failing, since the new transform only supports the enum. The int input is
# already deprecated and scheduled to be removed in 0.15. Should we support ints on the prototype
# transform? I guess it depends if we roll out before 0.15 or not.
# ArgsKwargs((30, 27), interpolation=0),
# ArgsKwargs((35, 29), interpolation=2),
# ArgsKwargs((34, 25), interpolation=3),
NotScriptableArgsKwargs(31, max_size=32),
ArgsKwargs([31], max_size=32),
NotScriptableArgsKwargs(30, max_size=100),
Expand Down
22 changes: 0 additions & 22 deletions test/test_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -1872,17 +1872,6 @@ def test_random_rotation():
# Checking if RandomRotation can be printed as string
t.__repr__()

# assert changed type warning
with pytest.warns(
UserWarning,
match=re.escape(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
t = transforms.RandomRotation((-10, 10), interpolation=2)
assert t.interpolation == transforms.InterpolationMode.BILINEAR


def test_random_rotation_error():
# assert fill being either a Sequence or a Number
Expand Down Expand Up @@ -2212,17 +2201,6 @@ def test_random_affine():
t = transforms.RandomAffine(10, interpolation=transforms.InterpolationMode.BILINEAR)
assert "bilinear" in t.__repr__()

# assert changed type warning
with pytest.warns(
UserWarning,
match=re.escape(
"Argument 'interpolation' of type int is deprecated since 0.13 and will be removed in 0.15. "
"Please use InterpolationMode enum."
),
):
t = transforms.RandomAffine(10, interpolation=2)
assert t.interpolation == transforms.InterpolationMode.BILINEAR


def test_elastic_transformation():
with pytest.raises(TypeError, match=r"alpha should be float or a sequence of floats"):
Expand Down
13 changes: 0 additions & 13 deletions torchvision/datasets/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,19 +48,6 @@ def _urlretrieve(url: str, filename: str, chunk_size: int = 1024 * 32) -> None:
_save_response_content(iter(lambda: response.read(chunk_size), b""), filename, length=response.length)


def gen_bar_updater() -> Callable[[int, int, int], None]:
warnings.warn("The function `gen_bar_update` is deprecated since 0.13 and will be removed in 0.15.")
pbar = tqdm(total=None)

def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)

return bar_update


def calculate_md5(fpath: str, chunk_size: int = 1024 * 1024) -> str:
# Setting the `usedforsecurity` flag does not change anything about the functionality, but indicates that we are
# not using the MD5 checksum for cryptography. This enables its usage in restricted environments like FIPS. Without
Expand Down
11 changes: 0 additions & 11 deletions torchvision/models/alexnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,14 +117,3 @@ def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True,
model.load_state_dict(weights.get_state_dict(progress=progress))

return model


# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs


model_urls = _ModelURLs(
{
"alexnet": AlexNet_Weights.IMAGENET1K_V1.url,
}
)
13 changes: 0 additions & 13 deletions torchvision/models/densenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -446,16 +446,3 @@ def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool
weights = DenseNet201_Weights.verify(weights)

return _densenet(32, (6, 12, 48, 32), 64, weights, progress, **kwargs)


# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs

model_urls = _ModelURLs(
{
"densenet121": DenseNet121_Weights.IMAGENET1K_V1.url,
"densenet169": DenseNet169_Weights.IMAGENET1K_V1.url,
"densenet201": DenseNet201_Weights.IMAGENET1K_V1.url,
"densenet161": DenseNet161_Weights.IMAGENET1K_V1.url,
}
)
13 changes: 0 additions & 13 deletions torchvision/models/detection/faster_rcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -841,16 +841,3 @@ def fasterrcnn_mobilenet_v3_large_fpn(
trainable_backbone_layers=trainable_backbone_layers,
**kwargs,
)


# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs


model_urls = _ModelURLs(
{
"fasterrcnn_resnet50_fpn_coco": FasterRCNN_ResNet50_FPN_Weights.COCO_V1.url,
"fasterrcnn_mobilenet_v3_large_320_fpn_coco": FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.COCO_V1.url,
"fasterrcnn_mobilenet_v3_large_fpn_coco": FasterRCNN_MobileNet_V3_Large_FPN_Weights.COCO_V1.url,
}
)
11 changes: 0 additions & 11 deletions torchvision/models/detection/fcos.py
Original file line number Diff line number Diff line change
Expand Up @@ -769,14 +769,3 @@ def fcos_resnet50_fpn(
model.load_state_dict(weights.get_state_dict(progress=progress))

return model


# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs


model_urls = _ModelURLs(
{
"fcos_resnet50_fpn_coco": FCOS_ResNet50_FPN_Weights.COCO_V1.url,
}
)
13 changes: 0 additions & 13 deletions torchvision/models/detection/keypoint_rcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -470,16 +470,3 @@ def keypointrcnn_resnet50_fpn(
overwrite_eps(model, 0.0)

return model


# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs


model_urls = _ModelURLs(
{
# legacy model for BC reasons, see https://github.com/pytorch/vision/issues/1606
"keypointrcnn_resnet50_fpn_coco_legacy": KeypointRCNN_ResNet50_FPN_Weights.COCO_LEGACY.url,
"keypointrcnn_resnet50_fpn_coco": KeypointRCNN_ResNet50_FPN_Weights.COCO_V1.url,
}
)
11 changes: 0 additions & 11 deletions torchvision/models/detection/mask_rcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -585,14 +585,3 @@ def maskrcnn_resnet50_fpn_v2(
model.load_state_dict(weights.get_state_dict(progress=progress))

return model


# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs


model_urls = _ModelURLs(
{
"maskrcnn_resnet50_fpn_coco": MaskRCNN_ResNet50_FPN_Weights.COCO_V1.url,
}
)
11 changes: 0 additions & 11 deletions torchvision/models/detection/retinanet.py
Original file line number Diff line number Diff line change
Expand Up @@ -897,14 +897,3 @@ def retinanet_resnet50_fpn_v2(
model.load_state_dict(weights.get_state_dict(progress=progress))

return model


# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs


model_urls = _ModelURLs(
{
"retinanet_resnet50_fpn_coco": RetinaNet_ResNet50_FPN_Weights.COCO_V1.url,
}
)
22 changes: 0 additions & 22 deletions torchvision/models/detection/ssd.py
Original file line number Diff line number Diff line change
Expand Up @@ -680,25 +680,3 @@ def ssd300_vgg16(
model.load_state_dict(weights.get_state_dict(progress=progress))

return model


# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs


model_urls = _ModelURLs(
{
"ssd300_vgg16_coco": SSD300_VGG16_Weights.COCO_V1.url,
}
)


backbone_urls = _ModelURLs(
{
# We port the features of a VGG16 backbone trained by amdegroot because unlike the one on TorchVision, it uses
# the same input standardization method as the paper.
# Ref: https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth
# Only the `features` weights have proper values, those on the `classifier` module are filled with nans.
"vgg16_features": VGG16_Weights.IMAGENET1K_FEATURES.url,
}
)
11 changes: 0 additions & 11 deletions torchvision/models/detection/ssdlite.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,14 +329,3 @@ def ssdlite320_mobilenet_v3_large(
model.load_state_dict(weights.get_state_dict(progress=progress))

return model


# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs


model_urls = _ModelURLs(
{
"ssdlite320_mobilenet_v3_large_coco": SSDLite320_MobileNet_V3_Large_Weights.COCO_V1.url,
}
)
30 changes: 0 additions & 30 deletions torchvision/models/efficientnet.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import copy
import math
import warnings
from dataclasses import dataclass
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
Expand Down Expand Up @@ -239,7 +238,6 @@ def __init__(
num_classes: int = 1000,
norm_layer: Optional[Callable[..., nn.Module]] = None,
last_channel: Optional[int] = None,
**kwargs: Any,
) -> None:
"""
EfficientNet V1 and V2 main class
Expand All @@ -263,16 +261,6 @@ def __init__(
):
raise TypeError("The inverted_residual_setting should be List[MBConvConfig]")

if "block" in kwargs:
warnings.warn(
"The parameter 'block' is deprecated since 0.13 and will be removed 0.15. "
"Please pass this information on 'MBConvConfig.block' instead."
)
if kwargs["block"] is not None:
for s in inverted_residual_setting:
if isinstance(s, MBConvConfig):
s.block = kwargs["block"]

if norm_layer is None:
norm_layer = nn.BatchNorm2d

Expand Down Expand Up @@ -1141,21 +1129,3 @@ def efficientnet_v2_l(
norm_layer=partial(nn.BatchNorm2d, eps=1e-03),
**kwargs,
)


# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs


model_urls = _ModelURLs(
{
"efficientnet_b0": EfficientNet_B0_Weights.IMAGENET1K_V1.url,
"efficientnet_b1": EfficientNet_B1_Weights.IMAGENET1K_V1.url,
"efficientnet_b2": EfficientNet_B2_Weights.IMAGENET1K_V1.url,
"efficientnet_b3": EfficientNet_B3_Weights.IMAGENET1K_V1.url,
"efficientnet_b4": EfficientNet_B4_Weights.IMAGENET1K_V1.url,
"efficientnet_b5": EfficientNet_B5_Weights.IMAGENET1K_V1.url,
"efficientnet_b6": EfficientNet_B6_Weights.IMAGENET1K_V1.url,
"efficientnet_b7": EfficientNet_B7_Weights.IMAGENET1K_V1.url,
}
)
12 changes: 0 additions & 12 deletions torchvision/models/googlenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,15 +343,3 @@ def googlenet(*, weights: Optional[GoogLeNet_Weights] = None, progress: bool = T
)

return model


# The dictionary below is internal implementation detail and will be removed in v0.15
from ._utils import _ModelURLs


model_urls = _ModelURLs(
{
# GoogLeNet ported from TensorFlow
"googlenet": GoogLeNet_Weights.IMAGENET1K_V1.url,
}
)
Loading