From 22f76cbe7d1761dbc5316f9d61f1b814b83b0fb2 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Mon, 19 Jun 2023 21:19:51 +0200 Subject: [PATCH 1/2] upgrade linters --- .pre-commit-config.yaml | 12 +-- docs/source/conf.py | 3 - references/depth/stereo/cascade_evaluation.py | 1 - references/depth/stereo/presets.py | 1 - references/depth/stereo/transforms.py | 8 -- references/detection/transforms.py | 4 +- references/optical_flow/train.py | 1 - references/optical_flow/transforms.py | 2 - references/optical_flow/utils.py | 1 - test/test_datasets.py | 2 - test/test_extended_models.py | 1 - test/test_functional_tensor.py | 12 --- test/test_image.py | 3 - test/test_models.py | 3 - .../test_models_detection_negative_samples.py | 1 - test/test_onnx.py | 1 - test/test_ops.py | 2 +- test/test_transforms.py | 5 - test/test_transforms_tensor.py | 2 - test/test_transforms_v2.py | 7 -- test/test_transforms_v2_consistency.py | 2 - test/test_transforms_v2_functional.py | 1 - test/test_videoapi.py | 1 - test/transforms_v2_kernel_infos.py | 2 - torchvision/_internally_replaced_utils.py | 1 - torchvision/datasets/_optical_flow.py | 3 - torchvision/datasets/_stereo_matching.py | 1 - torchvision/datasets/caltech.py | 4 +- torchvision/datasets/celeba.py | 4 +- torchvision/datasets/cifar.py | 1 - torchvision/datasets/cityscapes.py | 1 - torchvision/datasets/eurosat.py | 1 - torchvision/datasets/gtsrb.py | 2 - torchvision/datasets/kinetics.py | 1 - torchvision/datasets/lfw.py | 1 - torchvision/datasets/sbd.py | 1 - torchvision/datasets/stanford_cars.py | 1 - torchvision/datasets/video_utils.py | 1 - torchvision/datasets/widerface.py | 4 +- torchvision/io/_video_opt.py | 1 - torchvision/io/video_reader.py | 2 +- torchvision/models/densenet.py | 1 - torchvision/models/detection/_utils.py | 1 - .../models/detection/backbone_utils.py | 1 - torchvision/models/detection/faster_rcnn.py | 1 - torchvision/models/detection/fcos.py | 2 - .../models/detection/generalized_rcnn.py | 10 +- torchvision/models/detection/keypoint_rcnn.py | 1 - torchvision/models/detection/mask_rcnn.py | 1 - torchvision/models/detection/retinanet.py | 52 +++++++---- torchvision/models/detection/roi_heads.py | 93 +++++++++---------- torchvision/models/detection/rpn.py | 3 - torchvision/models/feature_extraction.py | 1 - torchvision/models/maxvit.py | 1 - torchvision/models/optical_flow/raft.py | 1 - torchvision/models/video/resnet.py | 4 - torchvision/ops/_utils.py | 1 - torchvision/ops/boxes.py | 1 - torchvision/ops/ciou_loss.py | 1 - torchvision/ops/diou_loss.py | 2 - torchvision/ops/giou_loss.py | 1 - torchvision/ops/misc.py | 3 - .../prototype/datasets/_builtin/pcam.py | 1 - .../prototype/datasets/_builtin/semeion.py | 1 - .../datasets/_builtin/stanford_cars.py | 1 - .../prototype/datasets/utils/_resource.py | 1 - .../models/depth/stereo/crestereo.py | 4 - torchvision/prototype/transforms/_augment.py | 2 - torchvision/transforms/_functional_pil.py | 8 -- torchvision/transforms/_functional_tensor.py | 10 -- torchvision/transforms/transforms.py | 1 - torchvision/transforms/v2/_geometry.py | 1 - torchvision/transforms/v2/_transform.py | 1 - .../transforms/v2/functional/_geometry.py | 2 - torchvision/transforms/v2/functional/_meta.py | 1 - torchvision/utils.py | 6 -- 76 files changed, 95 insertions(+), 235 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 343df7f1021..a73d2b28d1e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.0.1 + rev: v4.4.0 hooks: - id: check-docstring-first - id: check-toml @@ -11,20 +11,20 @@ repos: - id: end-of-file-fixer - repo: https://github.com/omnilib/ufmt - rev: v1.3.3 + rev: v2.1.0 hooks: - id: ufmt additional_dependencies: - - black == 22.3.0 - - usort == 1.0.2 + - black == 23.3.0 + - usort == 1.0.7 - repo: https://github.com/PyCQA/flake8 - rev: 5.0.4 + rev: 6.0.0 hooks: - id: flake8 args: [--config=setup.cfg] - repo: https://github.com/PyCQA/pydocstyle - rev: 6.1.1 + rev: 6.3.0 hooks: - id: pydocstyle diff --git a/docs/source/conf.py b/docs/source/conf.py index 4bb75fe6eeb..5d3def3f705 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -235,7 +235,6 @@ def patched_make_field(self, types, domain, items, **kw): # `kw` catches `env=None` needed for newer sphinx while maintaining # backwards compatibility when passed along further down! - # type: (list, unicode, tuple) -> nodes.field # noqa: F821 def handle_item(fieldarg, content): par = nodes.paragraph() par += addnodes.literal_strong("", fieldarg) # Patch: this line added @@ -321,7 +320,6 @@ def inject_weight_metadata(app, what, name, obj, options, lines): """ if obj.__name__.endswith(("_Weights", "_QuantizedWeights")): - if len(obj) == 0: lines[:] = ["There are no available pre-trained weights."] return @@ -468,6 +466,5 @@ def generate_weights_table(module, table_name, metrics, dataset, include_pattern def setup(app): - app.connect("autodoc-process-docstring", inject_minigalleries) app.connect("autodoc-process-docstring", inject_weight_metadata) diff --git a/references/depth/stereo/cascade_evaluation.py b/references/depth/stereo/cascade_evaluation.py index ee506ce3985..557575792e5 100644 --- a/references/depth/stereo/cascade_evaluation.py +++ b/references/depth/stereo/cascade_evaluation.py @@ -203,7 +203,6 @@ def evaluate(model, loader, args, writer=None, step=None): for n_cascades in args.n_cascades: for n_iters in args.n_iterations: - config = f"{n_cascades}c_{n_iters}i" config_image_folder = os.path.join(base_image_folder, config) os.makedirs(config_image_folder, exist_ok=True) diff --git a/references/depth/stereo/presets.py b/references/depth/stereo/presets.py index cadd2405178..b86f70cd331 100644 --- a/references/depth/stereo/presets.py +++ b/references/depth/stereo/presets.py @@ -84,7 +84,6 @@ def __init__( erase_px_range: Tuple[int, int] = (50, 100), erase_num_repeats: int = 1, ) -> None: - if scaling_type not in ["linear", "exponential"]: raise ValueError(f"Unknown scaling type: {scaling_type}. Available types: linear, exponential") diff --git a/references/depth/stereo/transforms.py b/references/depth/stereo/transforms.py index 9c4a6bab6d3..89b36c19033 100644 --- a/references/depth/stereo/transforms.py +++ b/references/depth/stereo/transforms.py @@ -19,7 +19,6 @@ def rand_float_range(size: Sequence[int], low: float, high: float) -> Tensor: class InterpolationStrategy: - _valid_modes: List[str] = ["mixed", "bicubic", "bilinear"] def __init__(self, mode: str = "mixed") -> None: @@ -156,7 +155,6 @@ def forward( disparities: Tuple[T_FLOW, T_FLOW], masks: Tuple[T_MASK, T_MASK], ) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]: - img_left = F.normalize(images[0], mean=self.mean, std=self.std) img_right = F.normalize(images[1], mean=self.mean, std=self.std) @@ -209,7 +207,6 @@ def forward( disparities: Tuple[T_FLOW, T_FLOW], masks: Tuple[T_MASK, T_MASK], ) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]: - if torch.rand(1) < self.p: # asymmetric: different transform for img1 and img2 img_left = super().forward(images[0]) @@ -236,7 +233,6 @@ def forward( disparities: Tuple[T_FLOW, T_FLOW], masks: Tuple[T_MASK, T_MASK], ) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]: - gamma = rand_float_range((1,), low=self.gamma_range[0], high=self.gamma_range[1]).item() if torch.rand(1) < self.p: @@ -285,7 +281,6 @@ def forward( disparities: T_STEREO_TENSOR, masks: T_STEREO_TENSOR, ) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]: - if torch.rand(1) < self.p: return images, disparities, masks @@ -342,7 +337,6 @@ def forward( disparities: T_STEREO_TENSOR, masks: T_STEREO_TENSOR, ) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]: - left_image, right_image = images if torch.rand(1) < self.p: @@ -421,7 +415,6 @@ def forward( disparities: Tuple[T_FLOW, T_FLOW], masks: Tuple[T_MASK, T_MASK], ) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]: - img_left, img_right = images dsp_left, dsp_right = disparities mask_left, mask_right = masks @@ -522,7 +515,6 @@ def forward( disparities: Tuple[T_FLOW, T_FLOW], masks: Tuple[T_MASK, T_MASK], ) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]: - img_left, img_right = images dsp_left, dsp_right = disparities mask_left, mask_right = masks diff --git a/references/detection/transforms.py b/references/detection/transforms.py index d26bf6eac85..b2d06520b36 100644 --- a/references/detection/transforms.py +++ b/references/detection/transforms.py @@ -166,8 +166,7 @@ def __init__( self.p = p @torch.jit.unused - def _get_fill_value(self, is_pil): - # type: (bool) -> int + def _get_fill_value(self, is_pil: bool) -> int: # We fake the type to make it work on JIT return tuple(int(x) for x in self.fill) if is_pil else 0 @@ -447,7 +446,6 @@ def _copy_paste( blending: bool = True, resize_interpolation: F.InterpolationMode = F.InterpolationMode.BILINEAR, ) -> Tuple[torch.Tensor, Dict[str, Tensor]]: - # Random paste targets selection: num_masks = len(paste_target["masks"]) diff --git a/references/optical_flow/train.py b/references/optical_flow/train.py index ab99cc3ae55..c5658a21a1e 100644 --- a/references/optical_flow/train.py +++ b/references/optical_flow/train.py @@ -181,7 +181,6 @@ def preprocessing(img1, img2, flow, valid_flow_mask): def train_one_epoch(model, optimizer, scheduler, train_loader, logger, args): device = torch.device(args.device) for data_blob in logger.log_every(train_loader): - optimizer.zero_grad() image1, image2, flow_gt, valid_flow_mask = (x.to(device) for x in data_blob) diff --git a/references/optical_flow/transforms.py b/references/optical_flow/transforms.py index bc831a2ee52..c6f7722c80d 100644 --- a/references/optical_flow/transforms.py +++ b/references/optical_flow/transforms.py @@ -6,7 +6,6 @@ class ValidateModelInput(torch.nn.Module): # Pass-through transform that checks the shape and dtypes to make sure the model gets what it expects def forward(self, img1, img2, flow, valid_flow_mask): - if not all(isinstance(arg, torch.Tensor) for arg in (img1, img2, flow, valid_flow_mask) if arg is not None): raise TypeError("This method expects all input arguments to be of type torch.Tensor.") if not all(arg.dtype == torch.float32 for arg in (img1, img2, flow) if arg is not None): @@ -90,7 +89,6 @@ def __init__(self, brightness=0, contrast=0, saturation=0, hue=0, p=0.2): self.p = p def forward(self, img1, img2, flow, valid_flow_mask): - if torch.rand(1) < self.p: # asymmetric: different transform for img1 and img2 img1 = super().forward(img1) diff --git a/references/optical_flow/utils.py b/references/optical_flow/utils.py index cd4b16eb0d8..d7208f5f0c6 100644 --- a/references/optical_flow/utils.py +++ b/references/optical_flow/utils.py @@ -155,7 +155,6 @@ def log_every(self, iterable, print_freq=5, header=None): def compute_metrics(flow_pred, flow_gt, valid_flow_mask=None): - epe = ((flow_pred - flow_gt) ** 2).sum(dim=1).sqrt() flow_norm = (flow_gt**2).sum(dim=1).sqrt() diff --git a/test/test_datasets.py b/test/test_datasets.py index ed6aa17d3f9..26d915a4c72 100644 --- a/test/test_datasets.py +++ b/test/test_datasets.py @@ -278,7 +278,6 @@ class CityScapesTestCase(datasets_utils.ImageDatasetTestCase): FEATURE_TYPES = (PIL.Image.Image, (dict, PIL.Image.Image)) def inject_fake_data(self, tmpdir, config): - tmpdir = pathlib.Path(tmpdir) mode_to_splits = { @@ -1948,7 +1947,6 @@ def inject_fake_data(self, tmpdir, config): num_examples = 2 if config["split"] == "train" else 3 for split_dir in ("training", "testing"): - datasets_utils.create_image_folder( root / split_dir, name="image_2", diff --git a/test/test_extended_models.py b/test/test_extended_models.py index 0866cc0f8a3..11e1babec7c 100644 --- a/test/test_extended_models.py +++ b/test/test_extended_models.py @@ -182,7 +182,6 @@ def test_naming_conventions(model_fn): ) @run_if_test_with_extended def test_schema_meta_validation(model_fn): - if model_fn.__name__ == "maskrcnn_resnet50_fpn_v2": pytest.skip(reason="FIXME https://github.com/pytorch/vision/issues/7349") diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index 0e1cc648a19..546d3a11cff 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -67,7 +67,6 @@ def test_scale_channel(): class TestRotate: - ALL_DTYPES = [None, torch.float32, torch.float64, torch.float16] scripted_rotate = torch.jit.script(F.rotate) IMG_W = 26 @@ -153,7 +152,6 @@ def test_rotate_interpolation_type(self): class TestAffine: - ALL_DTYPES = [None, torch.float32, torch.float64, torch.float16] scripted_affine = torch.jit.script(F.affine) @@ -407,7 +405,6 @@ def _get_data_dims_and_points_for_perspective(): ) @pytest.mark.parametrize("fn", [F.perspective, torch.jit.script(F.perspective)]) def test_perspective_pil_vs_tensor(device, dims_and_points, dt, fill, fn): - if dt == torch.float16 and device == "cpu": # skip float16 on CPU case return @@ -439,7 +436,6 @@ def test_perspective_pil_vs_tensor(device, dims_and_points, dt, fill, fn): @pytest.mark.parametrize("dims_and_points", _get_data_dims_and_points_for_perspective()) @pytest.mark.parametrize("dt", [None, torch.float32, torch.float64, torch.float16]) def test_perspective_batch(device, dims_and_points, dt): - if dt == torch.float16 and device == "cpu": # skip float16 on CPU case return @@ -491,7 +487,6 @@ def test_perspective_interpolation_type(): @pytest.mark.parametrize("max_size", [None, 34, 40, 1000]) @pytest.mark.parametrize("interpolation", [BILINEAR, BICUBIC, NEAREST, NEAREST_EXACT]) def test_resize(device, dt, size, max_size, interpolation): - if dt == torch.float16 and device == "cpu": # skip float16 on CPU case return @@ -541,7 +536,6 @@ def test_resize(device, dt, size, max_size, interpolation): @pytest.mark.parametrize("device", cpu_and_gpu()) def test_resize_asserts(device): - tensor, pil_img = _create_data(26, 36, device=device) res1 = F.resize(tensor, size=32, interpolation=PIL.Image.BILINEAR) @@ -561,7 +555,6 @@ def test_resize_asserts(device): @pytest.mark.parametrize("size", [[96, 72], [96, 420], [420, 72]]) @pytest.mark.parametrize("interpolation", [BILINEAR, BICUBIC]) def test_resize_antialias(device, dt, size, interpolation): - if dt == torch.float16 and device == "cpu": # skip float16 on CPU case return @@ -612,7 +605,6 @@ def test_resize_antialias(device, dt, size, interpolation): @needs_cuda @pytest.mark.parametrize("interpolation", [BILINEAR, BICUBIC]) def test_assert_resize_antialias(interpolation): - # Checks implementation on very large scales # and catch TORCH_CHECK inside PyTorch implementation torch.manual_seed(12) @@ -625,7 +617,6 @@ def test_assert_resize_antialias(interpolation): def test_resize_antialias_default_warning(): - img = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8) match = "The default value of the antialias" @@ -646,7 +637,6 @@ def test_resize_antialias_default_warning(): @pytest.mark.parametrize("size", [[10, 7], [10, 42], [42, 7]]) @pytest.mark.parametrize("interpolation", [BILINEAR, BICUBIC]) def test_interpolate_antialias_backward(device, dt, size, interpolation): - if dt == torch.float16 and device == "cpu": # skip float16 on CPU case return @@ -663,7 +653,6 @@ def test_interpolate_antialias_backward(device, dt, size, interpolation): def check_functional_vs_PIL_vs_scripted( fn, fn_pil, fn_t, config, device, dtype, channels=3, tol=2.0 + 1e-10, agg_method="max" ): - script_fn = torch.jit.script(fn) torch.manual_seed(15) tensor, pil_img = _create_data(26, 34, channels=channels, device=device) @@ -1100,7 +1089,6 @@ def test_crop(device, top, left, height, width): @pytest.mark.parametrize("sigma", [[0.5, 0.5], (0.5, 0.5), (0.8, 0.8), (1.7, 1.7)]) @pytest.mark.parametrize("fn", [F.gaussian_blur, torch.jit.script(F.gaussian_blur)]) def test_gaussian_blur(device, image_size, dt, ksize, sigma, fn): - # true_cv2_results = { # # np_img = np.arange(3 * 10 * 12, dtype="uint8").reshape((10, 12, 3)) # # cv2.GaussianBlur(np_img, ksize=(3, 3), sigmaX=0.8) diff --git a/test/test_image.py b/test/test_image.py index 4c210ea7eef..96f622084b2 100644 --- a/test/test_image.py +++ b/test/test_image.py @@ -78,7 +78,6 @@ def normalize_dimensions(img_pil): ], ) def test_decode_jpeg(img_path, pil_mode, mode): - with Image.open(img_path) as img: is_cmyk = img.mode == "CMYK" if pil_mode is not None: @@ -151,7 +150,6 @@ def test_damaged_corrupt_images(img_path): ], ) def test_decode_png(img_path, pil_mode, mode): - with Image.open(img_path) as img: if pil_mode is not None: img = img.convert(pil_mode) @@ -399,7 +397,6 @@ def test_decode_jpeg_cuda_errors(): def test_encode_jpeg_errors(): - with pytest.raises(RuntimeError, match="Input tensor dtype should be uint8"): encode_jpeg(torch.empty((3, 100, 100), dtype=torch.float32)) diff --git a/test/test_models.py b/test/test_models.py index f6eeb7c28c8..ad0ed1ffbe3 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -634,7 +634,6 @@ def checkOut(out): def test_generalizedrcnn_transform_repr(): - min_size, max_size = 224, 299 image_mean = [0.485, 0.456, 0.406] image_std = [0.229, 0.224, 0.225] @@ -1033,7 +1032,6 @@ def test_detection_model_trainable_backbone_layers(model_fn, disable_weight_load @pytest.mark.parametrize("model_fn", list_model_fns(models.optical_flow)) @pytest.mark.parametrize("scripted", (False, True)) def test_raft(model_fn, scripted): - torch.manual_seed(0) # We need very small images, otherwise the pickle size would exceed the 50KB @@ -1058,7 +1056,6 @@ def test_raft(model_fn, scripted): def test_presets_antialias(): - img = torch.randint(0, 256, size=(1, 3, 224, 224), dtype=torch.uint8) match = "The default value of the antialias parameter" diff --git a/test/test_models_detection_negative_samples.py b/test/test_models_detection_negative_samples.py index c91cfdf20a7..3baa929e5d7 100644 --- a/test/test_models_detection_negative_samples.py +++ b/test/test_models_detection_negative_samples.py @@ -51,7 +51,6 @@ def test_targets_to_anchors(self): assert matched_gt_boxes[0].dtype == torch.float32 def test_assign_targets_to_proposals(self): - proposals = [torch.randint(-50, 50, (20, 4), dtype=torch.float32)] gt_boxes = [torch.zeros((0, 4), dtype=torch.float32)] gt_labels = [torch.tensor([[0]], dtype=torch.int64)] diff --git a/test/test_onnx.py b/test/test_onnx.py index 19ed13b1a6d..ac314bc6c62 100644 --- a/test/test_onnx.py +++ b/test/test_onnx.py @@ -66,7 +66,6 @@ def run_model( self.ort_validate(onnx_io, test_inputs, test_ouputs) def ort_validate(self, onnx_io, inputs, outputs): - inputs, _ = torch.jit._flatten(inputs) outputs, _ = torch.jit._flatten(outputs) diff --git a/test/test_ops.py b/test/test_ops.py index 463ebb333ff..50ba72d78d0 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -3,7 +3,7 @@ from abc import ABC, abstractmethod from functools import lru_cache from itertools import product -from typing import Callable, List, Tuple +from typing import Callable, List import numpy as np import pytest diff --git a/test/test_transforms.py b/test/test_transforms.py index 41075c6514a..b8bb3b67cda 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -441,7 +441,6 @@ def test_resize_antialias_error(): def test_resize_antialias_default_warning(): - img = Image.new("RGB", size=(10, 10), color=127) # We make sure we don't warn for PIL images since the default behaviour doesn't change with warnings.catch_warnings(): @@ -1500,7 +1499,6 @@ def test_linear_transformation(): @pytest.mark.parametrize("dtype", int_dtypes()) def test_max_value(dtype): - assert F_t._max_value(dtype) == torch.iinfo(dtype).max # remove float testing as it can lead to errors such as # runtime error: 5.7896e+76 is outside the range of representable values of type 'float' @@ -1869,7 +1867,6 @@ def test_random_erasing(seed): def test_random_rotation(): - with pytest.raises(ValueError): transforms.RandomRotation(-0.7) @@ -2043,7 +2040,6 @@ def _to_3x3_inv(self, inv_result_matrix): return np.linalg.inv(result_matrix) def _test_transformation(self, angle, translate, scale, shear, pil_image, input_img, center=None): - a_rad = math.radians(angle) s_rad = [math.radians(sh_) for sh_ in shear] cnt = [20, 20] if center is None else center @@ -2167,7 +2163,6 @@ def test_transformation_range(self, angle, translate, scale, shear, pil_image, i def test_random_affine(): - with pytest.raises(ValueError): transforms.RandomAffine(-0.7) with pytest.raises(ValueError): diff --git a/test/test_transforms_tensor.py b/test/test_transforms_tensor.py index 077a12af490..fdee028ac5e 100644 --- a/test/test_transforms_tensor.py +++ b/test/test_transforms_tensor.py @@ -411,7 +411,6 @@ def test_resize_save_load(self, tmpdir): @pytest.mark.parametrize("interpolation", [NEAREST, BILINEAR, BICUBIC, NEAREST_EXACT]) @pytest.mark.parametrize("antialias", [None, True, False]) def test_resized_crop(self, scale, ratio, size, interpolation, antialias, device): - if antialias and interpolation in {NEAREST, NEAREST_EXACT}: pytest.skip(f"Can not resize if interpolation mode is {interpolation} and antialias=True") @@ -429,7 +428,6 @@ def test_resized_crop_save_load(self, tmpdir): _test_fn_save_load(fn, tmpdir) def test_antialias_default_warning(self): - img = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8) match = "The default value of the antialias" diff --git a/test/test_transforms_v2.py b/test/test_transforms_v2.py index 02e3e1e569a..7411eeb3c5b 100644 --- a/test/test_transforms_v2.py +++ b/test/test_transforms_v2.py @@ -1092,7 +1092,6 @@ def test__transform(self, distortion_scale, mocker): class TestElasticTransform: def test_assertions(self): - with pytest.raises(TypeError, match="alpha should be float or a sequence of floats"): transforms.ElasticTransform({}) @@ -1382,7 +1381,6 @@ def test__get_params(self, device, options, mocker): n_samples = 5 for _ in range(n_samples): - params = transform._get_params(sample) if options == [2.0]: @@ -1470,7 +1468,6 @@ def test__get_params(self, mocker): n_samples = 5 for _ in range(n_samples): - params = transform._get_params([sample]) assert "size" in params @@ -1575,7 +1572,6 @@ def test_assertions(self): ], ) def test__transform(self, inpt): - v = 121 * torch.ones(3 * 8 * 8) m = torch.ones(3 * 8 * 8, 3 * 8 * 8) transform = transforms.LinearTransformation(m, v) @@ -1773,7 +1769,6 @@ def test_antialias_warning(): @pytest.mark.parametrize("dataset_return_type", (dict, tuple)) @pytest.mark.parametrize("to_tensor", (transforms.ToTensor, transforms.ToImageTensor)) def test_classif_preset(image_type, label_type, dataset_return_type, to_tensor): - image = datapoints.Image(torch.randint(0, 256, size=(1, 3, 250, 250), dtype=torch.uint8)) if image_type is PIL.Image: image = to_pil_image(image[0]) @@ -1947,7 +1942,6 @@ def test_detection_preset(image_type, data_augmentation, to_tensor, sanitize): ) @pytest.mark.parametrize("sample_type", (tuple, dict)) def test_sanitize_bounding_boxes(min_size, labels_getter, sample_type): - if sample_type is tuple and not isinstance(labels_getter, str): # The "lambda inputs: inputs["labels"]" labels_getter used in this test # doesn't work if the input is a tuple. @@ -2049,7 +2043,6 @@ def test_sanitize_bounding_boxes_default_heuristic(key, sample_type): def test_sanitize_bounding_boxes_errors(): - good_bbox = datapoints.BoundingBox( [[0, 0, 10, 10]], format=datapoints.BoundingBoxFormat.XYXY, diff --git a/test/test_transforms_v2_consistency.py b/test/test_transforms_v2_consistency.py index e541feaf1eb..162d756b218 100644 --- a/test/test_transforms_v2_consistency.py +++ b/test/test_transforms_v2_consistency.py @@ -1140,7 +1140,6 @@ def make_label(extra_dims, categories): ) def test_transform(self, t_ref, t, data_kwargs): for dp in self.make_datapoints(**data_kwargs): - # We should use prototype transform first as reference transform performs inplace target update torch.manual_seed(12) output = t(dp) @@ -1206,7 +1205,6 @@ def set_seed(self, seed=12): def check(self, t, t_ref, data_kwargs=None): for dp, dp_ref in self.make_datapoints(**data_kwargs or dict()): - self.set_seed() actual = actual_image, actual_mask = t(dp) diff --git a/test/test_transforms_v2_functional.py b/test/test_transforms_v2_functional.py index 60a06f571b1..921e9ab81b4 100644 --- a/test/test_transforms_v2_functional.py +++ b/test/test_transforms_v2_functional.py @@ -905,7 +905,6 @@ def test_correctness_rotate_segmentation_mask_on_fixed_input(device): ], ) def test_correctness_crop_bounding_box(device, format, top, left, height, width, expected_bboxes): - # Expected bboxes computed using Albumentations: # import numpy as np # from albumentations.augmentations.crops.functional import crop_bbox_by_coords, normalize_bbox, denormalize_bbox diff --git a/test/test_videoapi.py b/test/test_videoapi.py index 05fbcbdbff2..e2c7995927d 100644 --- a/test/test_videoapi.py +++ b/test/test_videoapi.py @@ -275,7 +275,6 @@ def test_keyframe_reading(self, test_video, config, backend): av_keyframes = [] vr_keyframes = [] if av_reader.streams.video: - # get all keyframes using pyav. Then, seek randomly into video reader # and assert that all the returned values are in AV_KEYFRAMES diff --git a/test/transforms_v2_kernel_infos.py b/test/transforms_v2_kernel_infos.py index 7b877fb092d..b55be5a3ad1 100644 --- a/test/transforms_v2_kernel_infos.py +++ b/test/transforms_v2_kernel_infos.py @@ -830,7 +830,6 @@ def reference_inputs_rotate_bounding_box(): def reference_rotate_bounding_box(bounding_box, *, format, spatial_size, angle, expand=False, center=None): - if center is None: center = [spatial_size[1] * 0.5, spatial_size[0] * 0.5] @@ -1161,7 +1160,6 @@ def sample_inputs_pad_video(): def reference_pad_bounding_box(bounding_box, *, format, spatial_size, padding, padding_mode): - left, right, top, bottom = _parse_pad_padding(padding) affine_matrix = np.array( diff --git a/torchvision/_internally_replaced_utils.py b/torchvision/_internally_replaced_utils.py index d9a6e261ea2..cc4f672df85 100644 --- a/torchvision/_internally_replaced_utils.py +++ b/torchvision/_internally_replaced_utils.py @@ -23,7 +23,6 @@ def _is_remote_location_available() -> bool: def _get_extension_path(lib_name): - lib_dir = os.path.dirname(__file__) if os.name == "nt": # Register the main torchvision library location on the default DLL path diff --git a/torchvision/datasets/_optical_flow.py b/torchvision/datasets/_optical_flow.py index c7663258899..979dd4852dc 100644 --- a/torchvision/datasets/_optical_flow.py +++ b/torchvision/datasets/_optical_flow.py @@ -34,7 +34,6 @@ class FlowDataset(ABC, VisionDataset): _has_builtin_flow_mask = False def __init__(self, root: str, transforms: Optional[Callable] = None) -> None: - super().__init__(root=root) self.transforms = transforms @@ -53,7 +52,6 @@ def _read_flow(self, file_name: str): pass def __getitem__(self, index: int) -> Union[T1, T2]: - img1 = self._read_img(self._image_list[index][0]) img2 = self._read_img(self._image_list[index][1]) @@ -481,7 +479,6 @@ def _read_flo(file_name: str) -> np.ndarray: def _read_16bits_png_with_flow_and_valid_mask(file_name: str) -> Tuple[np.ndarray, np.ndarray]: - flow_and_valid = _read_png_16(file_name).to(torch.float32) flow, valid_flow_mask = flow_and_valid[:2, :, :], flow_and_valid[2, :, :] flow = (flow - 2**15) / 64 # This conversion is explained somewhere on the kitti archive diff --git a/torchvision/datasets/_stereo_matching.py b/torchvision/datasets/_stereo_matching.py index b07161d277c..03c03f22b73 100644 --- a/torchvision/datasets/_stereo_matching.py +++ b/torchvision/datasets/_stereo_matching.py @@ -66,7 +66,6 @@ def _scan_pairs( paths_left_pattern: str, paths_right_pattern: Optional[str] = None, ) -> List[Tuple[str, Optional[str]]]: - left_paths = list(sorted(glob(paths_left_pattern))) right_paths: List[Union[None, str]] diff --git a/torchvision/datasets/caltech.py b/torchvision/datasets/caltech.py index 3a9635dfe09..22db8696b74 100644 --- a/torchvision/datasets/caltech.py +++ b/torchvision/datasets/caltech.py @@ -68,7 +68,7 @@ def __init__( self.index: List[int] = [] self.y = [] - for (i, c) in enumerate(self.categories): + for i, c in enumerate(self.categories): n = len(os.listdir(os.path.join(self.root, "101_ObjectCategories", c))) self.index.extend(range(1, n + 1)) self.y.extend(n * [i]) @@ -179,7 +179,7 @@ def __init__( self.categories = sorted(os.listdir(os.path.join(self.root, "256_ObjectCategories"))) self.index: List[int] = [] self.y = [] - for (i, c) in enumerate(self.categories): + for i, c in enumerate(self.categories): n = len( [ item diff --git a/torchvision/datasets/celeba.py b/torchvision/datasets/celeba.py index d055f92f194..b9bea980501 100644 --- a/torchvision/datasets/celeba.py +++ b/torchvision/datasets/celeba.py @@ -130,7 +130,7 @@ def _load_csv( return CSV(headers, indices, torch.tensor(data_int)) def _check_integrity(self) -> bool: - for (_, md5, filename) in self.file_list: + for _, md5, filename in self.file_list: fpath = os.path.join(self.root, self.base_folder, filename) _, ext = os.path.splitext(filename) # Allow original archive to be deleted (zip and 7z) @@ -146,7 +146,7 @@ def download(self) -> None: print("Files already downloaded and verified") return - for (file_id, md5, filename) in self.file_list: + for file_id, md5, filename in self.file_list: download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder), filename, md5) extract_archive(os.path.join(self.root, self.base_folder, "img_align_celeba.zip")) diff --git a/torchvision/datasets/cifar.py b/torchvision/datasets/cifar.py index a2c4a7dc4c2..7f01895e4b0 100644 --- a/torchvision/datasets/cifar.py +++ b/torchvision/datasets/cifar.py @@ -56,7 +56,6 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = False, ) -> None: - super().__init__(root, transform=transform, target_transform=target_transform) self.train = train # training set or test set diff --git a/torchvision/datasets/cityscapes.py b/torchvision/datasets/cityscapes.py index 85544598176..2fca0f07525 100644 --- a/torchvision/datasets/cityscapes.py +++ b/torchvision/datasets/cityscapes.py @@ -137,7 +137,6 @@ def __init__( ] if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir): - if split == "train_extra": image_dir_zip = os.path.join(self.root, "leftImg8bit_trainextra.zip") else: diff --git a/torchvision/datasets/eurosat.py b/torchvision/datasets/eurosat.py index bec6df5312d..42c0fff64a5 100644 --- a/torchvision/datasets/eurosat.py +++ b/torchvision/datasets/eurosat.py @@ -46,7 +46,6 @@ def _check_exists(self) -> bool: return os.path.exists(self._data_folder) def download(self) -> None: - if self._check_exists(): return diff --git a/torchvision/datasets/gtsrb.py b/torchvision/datasets/gtsrb.py index f99a688586d..e285065fae0 100644 --- a/torchvision/datasets/gtsrb.py +++ b/torchvision/datasets/gtsrb.py @@ -31,7 +31,6 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = False, ) -> None: - super().__init__(root, transform=transform, target_transform=target_transform) self._split = verify_str_arg(split, "split", ("train", "test")) @@ -63,7 +62,6 @@ def __len__(self) -> int: return len(self._samples) def __getitem__(self, index: int) -> Tuple[Any, Any]: - path, target = self._samples[index] sample = PIL.Image.open(path).convert("RGB") diff --git a/torchvision/datasets/kinetics.py b/torchvision/datasets/kinetics.py index c1fe28d042e..79151cc47e1 100644 --- a/torchvision/datasets/kinetics.py +++ b/torchvision/datasets/kinetics.py @@ -110,7 +110,6 @@ def __init__( _legacy: bool = False, output_format: str = "TCHW", ) -> None: - # TODO: support test self.num_classes = verify_str_arg(num_classes, arg="num_classes", valid_values=["400", "600", "700"]) self.extensions = extensions diff --git a/torchvision/datasets/lfw.py b/torchvision/datasets/lfw.py index 7a5aa45aa4d..e05cf3c44e1 100644 --- a/torchvision/datasets/lfw.py +++ b/torchvision/datasets/lfw.py @@ -8,7 +8,6 @@ class _LFW(VisionDataset): - base_folder = "lfw-py" download_url_prefix = "http://vis-www.cs.umass.edu/lfw/" diff --git a/torchvision/datasets/sbd.py b/torchvision/datasets/sbd.py index 8399d025b1b..f060973e572 100644 --- a/torchvision/datasets/sbd.py +++ b/torchvision/datasets/sbd.py @@ -57,7 +57,6 @@ def __init__( download: bool = False, transforms: Optional[Callable] = None, ) -> None: - try: from scipy.io import loadmat diff --git a/torchvision/datasets/stanford_cars.py b/torchvision/datasets/stanford_cars.py index 3e9430ef214..268e8589cee 100644 --- a/torchvision/datasets/stanford_cars.py +++ b/torchvision/datasets/stanford_cars.py @@ -37,7 +37,6 @@ def __init__( target_transform: Optional[Callable] = None, download: bool = False, ) -> None: - try: import scipy.io as sio except ImportError: diff --git a/torchvision/datasets/video_utils.py b/torchvision/datasets/video_utils.py index bb1974b7a4f..ef985ac62ce 100644 --- a/torchvision/datasets/video_utils.py +++ b/torchvision/datasets/video_utils.py @@ -113,7 +113,6 @@ def __init__( _audio_channels: int = 0, output_format: str = "THWC", ) -> None: - self.video_paths = video_paths self.num_workers = num_workers diff --git a/torchvision/datasets/widerface.py b/torchvision/datasets/widerface.py index b46c7982d8b..c0820083ecc 100644 --- a/torchvision/datasets/widerface.py +++ b/torchvision/datasets/widerface.py @@ -167,7 +167,7 @@ def _check_integrity(self) -> bool: # Allow original archive to be deleted (zip). Only need the extracted images all_files = self.FILE_LIST.copy() all_files.append(self.ANNOTATIONS_FILE) - for (_, md5, filename) in all_files: + for _, md5, filename in all_files: file, ext = os.path.splitext(filename) extracted_dir = os.path.join(self.root, file) if not os.path.exists(extracted_dir): @@ -180,7 +180,7 @@ def download(self) -> None: return # download and extract image data - for (file_id, md5, filename) in self.FILE_LIST: + for file_id, md5, filename in self.FILE_LIST: download_file_from_google_drive(file_id, self.root, filename, md5) filepath = os.path.join(self.root, filename) extract_archive(filepath) diff --git a/torchvision/io/_video_opt.py b/torchvision/io/_video_opt.py index 2bd7d11929e..202240f3afb 100644 --- a/torchvision/io/_video_opt.py +++ b/torchvision/io/_video_opt.py @@ -66,7 +66,6 @@ def __init__(self) -> None: def _validate_pts(pts_range: Tuple[int, int]) -> None: - if pts_range[0] > pts_range[1] > 0: raise ValueError( f"Start pts should not be smaller than end pts, got start pts: {pts_range[0]} and end pts: {pts_range[1]}" diff --git a/torchvision/io/video_reader.py b/torchvision/io/video_reader.py index 1cdcb267d73..24cd66ac09d 100644 --- a/torchvision/io/video_reader.py +++ b/torchvision/io/video_reader.py @@ -254,7 +254,7 @@ def get_metadata(self) -> Dict[str, Any]: (dict): dictionary containing duration and frame rate for every stream """ if self.backend == "pyav": - metadata = {} # type: Dict[str, Any] + metadata: Dict[str, Any] = {} for stream in self.container.streams: if stream.type not in metadata: if stream.type == "video": diff --git a/torchvision/models/densenet.py b/torchvision/models/densenet.py index 3b42807cc96..f7ff8f4f403 100644 --- a/torchvision/models/densenet.py +++ b/torchvision/models/densenet.py @@ -159,7 +159,6 @@ def __init__( num_classes: int = 1000, memory_efficient: bool = False, ) -> None: - super().__init__() _log_api_usage_once(self) diff --git a/torchvision/models/detection/_utils.py b/torchvision/models/detection/_utils.py index a25bdc1d42c..a0cb7bbc691 100644 --- a/torchvision/models/detection/_utils.py +++ b/torchvision/models/detection/_utils.py @@ -273,7 +273,6 @@ def encode(self, reference_boxes: Tensor, proposals: Tensor) -> Tensor: return targets def decode(self, rel_codes: Tensor, boxes: Tensor) -> Tensor: - """ From a set of original boxes and encoded relative box offsets, get the decoded boxes. diff --git a/torchvision/models/detection/backbone_utils.py b/torchvision/models/detection/backbone_utils.py index 87ae8627fbe..82dfd3a7e4c 100644 --- a/torchvision/models/detection/backbone_utils.py +++ b/torchvision/models/detection/backbone_utils.py @@ -120,7 +120,6 @@ def _resnet_fpn_extractor( extra_blocks: Optional[ExtraFPNBlock] = None, norm_layer: Optional[Callable[..., nn.Module]] = None, ) -> BackboneWithFPN: - # select layers that won't be frozen if trainable_layers < 0 or trainable_layers > 5: raise ValueError(f"Trainable layers should be in the range [0,5], got {trainable_layers}") diff --git a/torchvision/models/detection/faster_rcnn.py b/torchvision/models/detection/faster_rcnn.py index de32f3453bd..2b5a740428c 100644 --- a/torchvision/models/detection/faster_rcnn.py +++ b/torchvision/models/detection/faster_rcnn.py @@ -197,7 +197,6 @@ def __init__( bbox_reg_weights=None, **kwargs, ): - if not hasattr(backbone, "out_channels"): raise ValueError( "backbone should contain an attribute out_channels " diff --git a/torchvision/models/detection/fcos.py b/torchvision/models/detection/fcos.py index dd846aea9ad..631849c4d49 100644 --- a/torchvision/models/detection/fcos.py +++ b/torchvision/models/detection/fcos.py @@ -56,7 +56,6 @@ def compute_loss( anchors: List[Tensor], matched_idxs: List[Tensor], ) -> Dict[str, Tensor]: - cls_logits = head_outputs["cls_logits"] # [N, HWA, C] bbox_regression = head_outputs["bbox_regression"] # [N, HWA, 4] bbox_ctrness = head_outputs["bbox_ctrness"] # [N, HWA, 1] @@ -568,7 +567,6 @@ def forward( like `scores`, `labels` and `mask` (for Mask R-CNN models). """ if self.training: - if targets is None: torch._assert(False, "targets should not be none when in training mode") else: diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index b481265077f..46b9da1a827 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -36,15 +36,17 @@ def __init__(self, backbone: nn.Module, rpn: nn.Module, roi_heads: nn.Module, tr self._has_warned = False @torch.jit.unused - def eager_outputs(self, losses, detections): - # type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Union[Dict[str, Tensor], List[Dict[str, Tensor]]] + def eager_outputs( + self, losses: Dict[str, Tensor], detections: List[Dict[str, Tensor]] + ) -> Union[Dict[str, Tensor], List[Dict[str, Tensor]]]: if self.training: return losses return detections - def forward(self, images, targets=None): - # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]] + def forward( + self, images: List[Tensor], targets: Optional[List[Dict[str, Tensor]]] = None + ) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]: """ Args: images (list[Tensor]): images to be processed diff --git a/torchvision/models/detection/keypoint_rcnn.py b/torchvision/models/detection/keypoint_rcnn.py index 1ef0c1950d1..396d18d9cdb 100644 --- a/torchvision/models/detection/keypoint_rcnn.py +++ b/torchvision/models/detection/keypoint_rcnn.py @@ -200,7 +200,6 @@ def __init__( num_keypoints=None, **kwargs, ): - if not isinstance(keypoint_roi_pool, (MultiScaleRoIAlign, type(None))): raise TypeError( "keypoint_roi_pool should be of type MultiScaleRoIAlign or None instead of {type(keypoint_roi_pool)}" diff --git a/torchvision/models/detection/mask_rcnn.py b/torchvision/models/detection/mask_rcnn.py index 695dd4d63ec..a37fe4eae20 100644 --- a/torchvision/models/detection/mask_rcnn.py +++ b/torchvision/models/detection/mask_rcnn.py @@ -199,7 +199,6 @@ def __init__( mask_predictor=None, **kwargs, ): - if not isinstance(mask_roi_pool, (MultiScaleRoIAlign, type(None))): raise TypeError( f"mask_roi_pool should be of type MultiScaleRoIAlign or None instead of {type(mask_roi_pool)}" diff --git a/torchvision/models/detection/retinanet.py b/torchvision/models/detection/retinanet.py index 3a9cf80d1d5..079093bfd45 100644 --- a/torchvision/models/detection/retinanet.py +++ b/torchvision/models/detection/retinanet.py @@ -72,15 +72,19 @@ def __init__(self, in_channels, num_anchors, num_classes, norm_layer: Optional[C ) self.regression_head = RetinaNetRegressionHead(in_channels, num_anchors, norm_layer=norm_layer) - def compute_loss(self, targets, head_outputs, anchors, matched_idxs): - # type: (List[Dict[str, Tensor]], Dict[str, Tensor], List[Tensor], List[Tensor]) -> Dict[str, Tensor] + def compute_loss( + self, + targets: List[Dict[str, Tensor]], + head_outputs: Dict[str, Tensor], + anchors: List[Tensor], + matched_idxs: List[Tensor], + ) -> Dict[str, Tensor]: return { "classification": self.classification_head.compute_loss(targets, head_outputs, matched_idxs), "bbox_regression": self.regression_head.compute_loss(targets, head_outputs, anchors, matched_idxs), } - def forward(self, x): - # type: (List[Tensor]) -> Dict[str, Tensor] + def forward(self, x: List[Tensor]) -> Dict[str, Tensor]: return {"cls_logits": self.classification_head(x), "bbox_regression": self.regression_head(x)} @@ -155,8 +159,9 @@ def _load_from_state_dict( error_msgs, ) - def compute_loss(self, targets, head_outputs, matched_idxs): - # type: (List[Dict[str, Tensor]], Dict[str, Tensor], List[Tensor]) -> Tensor + def compute_loss( + self, targets: List[Dict[str, Tensor]], head_outputs: Dict[str, Tensor], matched_idxs: List[Tensor] + ) -> Tensor: losses = [] cls_logits = head_outputs["cls_logits"] @@ -188,8 +193,7 @@ def compute_loss(self, targets, head_outputs, matched_idxs): return _sum(losses) / len(targets) - def forward(self, x): - # type: (List[Tensor]) -> Tensor + def forward(self, x: List[Tensor]) -> Tensor: all_cls_logits = [] for features in x: @@ -269,8 +273,13 @@ def _load_from_state_dict( error_msgs, ) - def compute_loss(self, targets, head_outputs, anchors, matched_idxs): - # type: (List[Dict[str, Tensor]], Dict[str, Tensor], List[Tensor], List[Tensor]) -> Tensor + def compute_loss( + self, + targets: List[Dict[str, Tensor]], + head_outputs: Dict[str, Tensor], + anchors: List[Tensor], + matched_idxs: List[Tensor], + ) -> Tensor: losses = [] bbox_regression = head_outputs["bbox_regression"] @@ -301,8 +310,7 @@ def compute_loss(self, targets, head_outputs, anchors, matched_idxs): return _sum(losses) / max(1, len(targets)) - def forward(self, x): - # type: (List[Tensor]) -> Tensor + def forward(self, x: List[Tensor]) -> Tensor: all_bbox_regression = [] for features in x: @@ -480,15 +488,17 @@ def __init__( self._has_warned = False @torch.jit.unused - def eager_outputs(self, losses, detections): - # type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]] + def eager_outputs( + self, losses: Dict[str, Tensor], detections: List[Dict[str, Tensor]] + ) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]: if self.training: return losses return detections - def compute_loss(self, targets, head_outputs, anchors): - # type: (List[Dict[str, Tensor]], Dict[str, Tensor], List[Tensor]) -> Dict[str, Tensor] + def compute_loss( + self, targets: List[Dict[str, Tensor]], head_outputs: Dict[str, Tensor], anchors: List[Tensor] + ) -> Dict[str, Tensor]: matched_idxs = [] for anchors_per_image, targets_per_image in zip(anchors, targets): if targets_per_image["boxes"].numel() == 0: @@ -502,8 +512,9 @@ def compute_loss(self, targets, head_outputs, anchors): return self.head.compute_loss(targets, head_outputs, anchors, matched_idxs) - def postprocess_detections(self, head_outputs, anchors, image_shapes): - # type: (Dict[str, List[Tensor]], List[List[Tensor]], List[Tuple[int, int]]) -> List[Dict[str, Tensor]] + def postprocess_detections( + self, head_outputs: Dict[str, List[Tensor]], anchors: List[List[Tensor]], image_shapes: List[Tuple[int, int]] + ) -> List[Dict[str, Tensor]]: class_logits = head_outputs["cls_logits"] box_regression = head_outputs["bbox_regression"] @@ -566,8 +577,9 @@ def postprocess_detections(self, head_outputs, anchors, image_shapes): return detections - def forward(self, images, targets=None): - # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]] + def forward( + self, images: List[Tensor], targets: Optional[List[Dict[str, Tensor]]] = None + ) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]: """ Args: images (list[Tensor]): images to be processed diff --git a/torchvision/models/detection/roi_heads.py b/torchvision/models/detection/roi_heads.py index 51b210cb6f3..c44c769edb9 100644 --- a/torchvision/models/detection/roi_heads.py +++ b/torchvision/models/detection/roi_heads.py @@ -9,8 +9,9 @@ from . import _utils as det_utils -def fastrcnn_loss(class_logits, box_regression, labels, regression_targets): - # type: (Tensor, Tensor, List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor] +def fastrcnn_loss( + class_logits: Tensor, box_regression: Tensor, labels: List[Tensor], regression_targets: List[Tensor] +) -> Tuple[Tensor, Tensor]: """ Computes the loss for Faster R-CNN. @@ -49,8 +50,7 @@ def fastrcnn_loss(class_logits, box_regression, labels, regression_targets): return classification_loss, box_loss -def maskrcnn_inference(x, labels): - # type: (Tensor, List[Tensor]) -> List[Tensor] +def maskrcnn_inference(x: Tensor, labels: List[Tensor]) -> List[Tensor]: """ From the results of the CNN, post process the masks by taking the mask corresponding to the class with max @@ -79,8 +79,7 @@ def maskrcnn_inference(x, labels): return mask_prob -def project_masks_on_boxes(gt_masks, boxes, matched_idxs, M): - # type: (Tensor, Tensor, Tensor, int) -> Tensor +def project_masks_on_boxes(gt_masks: Tensor, boxes: Tensor, matched_idxs: Tensor, M: int) -> Tensor: """ Given segmentation masks and the bounding boxes corresponding to the location of the masks in the image, this function @@ -94,8 +93,13 @@ def project_masks_on_boxes(gt_masks, boxes, matched_idxs, M): return roi_align(gt_masks, rois, (M, M), 1.0)[:, 0] -def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs): - # type: (Tensor, List[Tensor], List[Tensor], List[Tensor], List[Tensor]) -> Tensor +def maskrcnn_loss( + mask_logits: Tensor, + proposals: List[Tensor], + gt_masks: List[Tensor], + gt_labels: List[Tensor], + mask_matched_idxs: List[Tensor], +) -> Tensor: """ Args: proposals (list[BoxList]) @@ -126,8 +130,7 @@ def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs return mask_loss -def keypoints_to_heatmap(keypoints, rois, heatmap_size): - # type: (Tensor, Tensor, int) -> Tuple[Tensor, Tensor] +def keypoints_to_heatmap(keypoints: Tensor, rois: Tensor, heatmap_size: int) -> Tuple[Tensor, Tensor]: offset_x = rois[:, 0] offset_y = rois[:, 1] scale_x = heatmap_size / (rois[:, 2] - rois[:, 0]) @@ -295,8 +298,9 @@ def heatmaps_to_keypoints(maps, rois): return xy_preds.permute(0, 2, 1), end_scores -def keypointrcnn_loss(keypoint_logits, proposals, gt_keypoints, keypoint_matched_idxs): - # type: (Tensor, List[Tensor], List[Tensor], List[Tensor]) -> Tensor +def keypointrcnn_loss( + keypoint_logits: Tensor, proposals: List[Tensor], gt_keypoints: List[Tensor], keypoint_matched_idxs: List[Tensor] +) -> Tensor: N, K, H, W = keypoint_logits.shape if H != W: raise ValueError( @@ -326,8 +330,7 @@ def keypointrcnn_loss(keypoint_logits, proposals, gt_keypoints, keypoint_matched return keypoint_loss -def keypointrcnn_inference(x, boxes): - # type: (Tensor, List[Tensor]) -> Tuple[List[Tensor], List[Tensor]] +def keypointrcnn_inference(x: Tensor, boxes: List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]: kp_probs = [] kp_scores = [] @@ -342,8 +345,7 @@ def keypointrcnn_inference(x, boxes): return kp_probs, kp_scores -def _onnx_expand_boxes(boxes, scale): - # type: (Tensor, float) -> Tensor +def _onnx_expand_boxes(boxes: Tensor, scale: float) -> Tensor: w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5 h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5 x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5 @@ -363,8 +365,7 @@ def _onnx_expand_boxes(boxes, scale): # the next two functions should be merged inside Masker # but are kept here for the moment while we need them # temporarily for paste_mask_in_image -def expand_boxes(boxes, scale): - # type: (Tensor, float) -> Tensor +def expand_boxes(boxes: Tensor, scale: float) -> Tensor: if torchvision._is_tracing(): return _onnx_expand_boxes(boxes, scale) w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5 @@ -384,13 +385,11 @@ def expand_boxes(boxes, scale): @torch.jit.unused -def expand_masks_tracing_scale(M, padding): - # type: (int, int) -> float +def expand_masks_tracing_scale(M: int, padding: int) -> float: return torch.tensor(M + 2 * padding).to(torch.float32) / torch.tensor(M).to(torch.float32) -def expand_masks(mask, padding): - # type: (Tensor, int) -> Tuple[Tensor, float] +def expand_masks(mask: Tensor, padding: int) -> Tuple[Tensor, float]: M = mask.shape[-1] if torch._C._get_tracing_state(): # could not import is_tracing(), not sure why scale = expand_masks_tracing_scale(M, padding) @@ -400,8 +399,7 @@ def expand_masks(mask, padding): return padded_mask, scale -def paste_mask_in_image(mask, box, im_h, im_w): - # type: (Tensor, Tensor, int, int) -> Tensor +def paste_mask_in_image(mask: Tensor, box: Tensor, im_h: int, im_w: int) -> Tensor: TO_REMOVE = 1 w = int(box[2] - box[0] + TO_REMOVE) h = int(box[3] - box[1] + TO_REMOVE) @@ -471,8 +469,7 @@ def _onnx_paste_masks_in_image_loop(masks, boxes, im_h, im_w): return res_append -def paste_masks_in_image(masks, boxes, img_shape, padding=1): - # type: (Tensor, Tensor, Tuple[int, int], int) -> Tensor +def paste_masks_in_image(masks: Tensor, boxes: Tensor, img_shape: Tuple[int, int], padding: int = 1) -> Tensor: masks, scale = expand_masks(masks, padding=padding) boxes = expand_boxes(boxes, scale).to(dtype=torch.int64) im_h, im_w = img_shape @@ -565,12 +562,12 @@ def has_keypoint(self): return False return True - def assign_targets_to_proposals(self, proposals, gt_boxes, gt_labels): - # type: (List[Tensor], List[Tensor], List[Tensor]) -> Tuple[List[Tensor], List[Tensor]] + def assign_targets_to_proposals( + self, proposals: List[Tensor], gt_boxes: List[Tensor], gt_labels: List[Tensor] + ) -> Tuple[List[Tensor], List[Tensor]]: matched_idxs = [] labels = [] for proposals_in_image, gt_boxes_in_image, gt_labels_in_image in zip(proposals, gt_boxes, gt_labels): - if gt_boxes_in_image.numel() == 0: # Background image device = proposals_in_image.device @@ -600,8 +597,7 @@ def assign_targets_to_proposals(self, proposals, gt_boxes, gt_labels): labels.append(labels_in_image) return matched_idxs, labels - def subsample(self, labels): - # type: (List[Tensor]) -> List[Tensor] + def subsample(self, labels: List[Tensor]) -> List[Tensor]: sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels) sampled_inds = [] for img_idx, (pos_inds_img, neg_inds_img) in enumerate(zip(sampled_pos_inds, sampled_neg_inds)): @@ -609,14 +605,12 @@ def subsample(self, labels): sampled_inds.append(img_sampled_inds) return sampled_inds - def add_gt_proposals(self, proposals, gt_boxes): - # type: (List[Tensor], List[Tensor]) -> List[Tensor] + def add_gt_proposals(self, proposals: List[Tensor], gt_boxes: List[Tensor]) -> List[Tensor]: proposals = [torch.cat((proposal, gt_box)) for proposal, gt_box in zip(proposals, gt_boxes)] return proposals - def check_targets(self, targets): - # type: (Optional[List[Dict[str, Tensor]]]) -> None + def check_targets(self, targets: Optional[List[Dict[str, Tensor]]]) -> None: if targets is None: raise ValueError("targets should not be None") if not all(["boxes" in t for t in targets]): @@ -629,10 +623,9 @@ def check_targets(self, targets): def select_training_samples( self, - proposals, # type: List[Tensor] - targets, # type: Optional[List[Dict[str, Tensor]]] - ): - # type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]] + proposals: List[Tensor], + targets: Optional[List[Dict[str, Tensor]]], + ) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]]: self.check_targets(targets) if targets is None: raise ValueError("targets should not be None") @@ -667,12 +660,11 @@ def select_training_samples( def postprocess_detections( self, - class_logits, # type: Tensor - box_regression, # type: Tensor - proposals, # type: List[Tensor] - image_shapes, # type: List[Tuple[int, int]] - ): - # type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor]] + class_logits: Tensor, + box_regression: Tensor, + proposals: List[Tensor], + image_shapes: List[Tuple[int, int]], + ) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]: device = class_logits.device num_classes = class_logits.shape[-1] @@ -726,12 +718,11 @@ def postprocess_detections( def forward( self, - features, # type: Dict[str, Tensor] - proposals, # type: List[Tensor] - image_shapes, # type: List[Tuple[int, int]] - targets=None, # type: Optional[List[Dict[str, Tensor]]] - ): - # type: (...) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]] + features: Dict[str, Tensor], + proposals: List[Tensor], + image_shapes: List[Tuple[int, int]], + targets: Optional[List[Dict[str, Tensor]]] = None, + ) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]]: """ Args: features (List[Tensor]) diff --git a/torchvision/models/detection/rpn.py b/torchvision/models/detection/rpn.py index 07a8b931150..5e0c68d47de 100644 --- a/torchvision/models/detection/rpn.py +++ b/torchvision/models/detection/rpn.py @@ -192,7 +192,6 @@ def post_nms_top_n(self) -> int: def assign_targets_to_anchors( self, anchors: List[Tensor], targets: List[Dict[str, Tensor]] ) -> Tuple[List[Tensor], List[Tensor]]: - labels = [] matched_gt_boxes = [] for anchors_per_image, targets_per_image in zip(anchors, targets): @@ -245,7 +244,6 @@ def filter_proposals( image_shapes: List[Tuple[int, int]], num_anchors_per_level: List[int], ) -> Tuple[List[Tensor], List[Tensor]]: - num_images = proposals.shape[0] device = proposals.device # do not backprop through objectness @@ -338,7 +336,6 @@ def forward( features: Dict[str, Tensor], targets: Optional[List[Dict[str, Tensor]]] = None, ) -> Tuple[List[Tensor], Dict[str, Tensor]]: - """ Args: images (ImageList): images for which we want to compute the predictions diff --git a/torchvision/models/feature_extraction.py b/torchvision/models/feature_extraction.py index d8c2dca4afe..0503d919073 100644 --- a/torchvision/models/feature_extraction.py +++ b/torchvision/models/feature_extraction.py @@ -443,7 +443,6 @@ def create_feature_extractor( is_training = model.training if all(arg is None for arg in [return_nodes, train_return_nodes, eval_return_nodes]): - raise ValueError( "Either `return_nodes` or `train_return_nodes` and `eval_return_nodes` together, should be specified" ) diff --git a/torchvision/models/maxvit.py b/torchvision/models/maxvit.py index d8e62cd36a2..8dfd70c461e 100644 --- a/torchvision/models/maxvit.py +++ b/torchvision/models/maxvit.py @@ -743,7 +743,6 @@ def _maxvit( # kwargs, **kwargs: Any, ) -> MaxVit: - if weights is not None: _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"])) assert weights.meta["min_size"][0] == weights.meta["min_size"][1] diff --git a/torchvision/models/optical_flow/raft.py b/torchvision/models/optical_flow/raft.py index c294777ee6f..84edd50746d 100644 --- a/torchvision/models/optical_flow/raft.py +++ b/torchvision/models/optical_flow/raft.py @@ -482,7 +482,6 @@ def __init__(self, *, feature_encoder, context_encoder, corr_block, update_block raise ValueError("The update_block parameter should expose a 'hidden_state_size' attribute.") def forward(self, image1, image2, num_flow_updates: int = 12): - batch_size, _, h, w = image1.shape if (h, w) != image2.shape[-2:]: raise ValueError(f"input images should have the same shape, instead got ({h}, {w}) != {image2.shape[-2:]}") diff --git a/torchvision/models/video/resnet.py b/torchvision/models/video/resnet.py index a1cb2884013..0fe61d6b211 100644 --- a/torchvision/models/video/resnet.py +++ b/torchvision/models/video/resnet.py @@ -26,7 +26,6 @@ class Conv3DSimple(nn.Conv3d): def __init__( self, in_planes: int, out_planes: int, midplanes: Optional[int] = None, stride: int = 1, padding: int = 1 ) -> None: - super().__init__( in_channels=in_planes, out_channels=out_planes, @@ -68,7 +67,6 @@ class Conv3DNoTemporal(nn.Conv3d): def __init__( self, in_planes: int, out_planes: int, midplanes: Optional[int] = None, stride: int = 1, padding: int = 1 ) -> None: - super().__init__( in_channels=in_planes, out_channels=out_planes, @@ -84,7 +82,6 @@ def get_downsample_stride(stride: int) -> Tuple[int, int, int]: class BasicBlock(nn.Module): - expansion = 1 def __init__( @@ -131,7 +128,6 @@ def __init__( stride: int = 1, downsample: Optional[nn.Module] = None, ) -> None: - super().__init__() midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes) diff --git a/torchvision/ops/_utils.py b/torchvision/ops/_utils.py index a6ca557a98b..1404dafad25 100644 --- a/torchvision/ops/_utils.py +++ b/torchvision/ops/_utils.py @@ -88,7 +88,6 @@ def _loss_inter_union( boxes1: torch.Tensor, boxes2: torch.Tensor, ) -> Tuple[torch.Tensor, torch.Tensor]: - x1, y1, x2, y2 = boxes1.unbind(dim=-1) x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) diff --git a/torchvision/ops/boxes.py b/torchvision/ops/boxes.py index a541f8d880a..7f5c9885195 100644 --- a/torchvision/ops/boxes.py +++ b/torchvision/ops/boxes.py @@ -363,7 +363,6 @@ def distance_box_iou(boxes1: Tensor, boxes2: Tensor, eps: float = 1e-7) -> Tenso def _box_diou_iou(boxes1: Tensor, boxes2: Tensor, eps: float = 1e-7) -> Tuple[Tensor, Tensor]: - iou = box_iou(boxes1, boxes2) lti = torch.min(boxes1[:, None, :2], boxes2[:, :2]) rbi = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) diff --git a/torchvision/ops/ciou_loss.py b/torchvision/ops/ciou_loss.py index 75a1c4cb1f3..d825e79dff0 100644 --- a/torchvision/ops/ciou_loss.py +++ b/torchvision/ops/ciou_loss.py @@ -11,7 +11,6 @@ def complete_box_iou_loss( reduction: str = "none", eps: float = 1e-7, ) -> torch.Tensor: - """ Gradient-friendly IoU loss with an additional penalty that is non-zero when the boxes do not overlap. This loss function considers important geometrical diff --git a/torchvision/ops/diou_loss.py b/torchvision/ops/diou_loss.py index c64c6673a88..b08da18f32b 100644 --- a/torchvision/ops/diou_loss.py +++ b/torchvision/ops/diou_loss.py @@ -12,7 +12,6 @@ def distance_box_iou_loss( reduction: str = "none", eps: float = 1e-7, ) -> torch.Tensor: - """ Gradient-friendly IoU loss with an additional penalty that is non-zero when the distance between boxes' centers isn't zero. Indeed, for two exactly overlapping @@ -69,7 +68,6 @@ def _diou_iou_loss( boxes2: torch.Tensor, eps: float = 1e-7, ) -> Tuple[torch.Tensor, torch.Tensor]: - intsct, union = _loss_inter_union(boxes1, boxes2) iou = intsct / (union + eps) # smallest enclosing box diff --git a/torchvision/ops/giou_loss.py b/torchvision/ops/giou_loss.py index ec8bc8852fe..e56dcc16c7d 100644 --- a/torchvision/ops/giou_loss.py +++ b/torchvision/ops/giou_loss.py @@ -10,7 +10,6 @@ def generalized_box_iou_loss( reduction: str = "none", eps: float = 1e-7, ) -> torch.Tensor: - """ Gradient-friendly IoU loss with an additional penalty that is non-zero when the boxes do not overlap and scales with the size of their smallest enclosing box. diff --git a/torchvision/ops/misc.py b/torchvision/ops/misc.py index 0bbea6bce43..73299d21c8f 100644 --- a/torchvision/ops/misc.py +++ b/torchvision/ops/misc.py @@ -81,7 +81,6 @@ def __init__( bias: Optional[bool] = None, conv_layer: Callable[..., torch.nn.Module] = torch.nn.Conv2d, ) -> None: - if padding is None: if isinstance(kernel_size, int) and isinstance(dilation, int): padding = (kernel_size - 1) // 2 * dilation @@ -155,7 +154,6 @@ def __init__( inplace: Optional[bool] = True, bias: Optional[bool] = None, ) -> None: - super().__init__( in_channels, out_channels, @@ -204,7 +202,6 @@ def __init__( inplace: Optional[bool] = True, bias: Optional[bool] = None, ) -> None: - super().__init__( in_channels, out_channels, diff --git a/torchvision/prototype/datasets/_builtin/pcam.py b/torchvision/prototype/datasets/_builtin/pcam.py index 4de5ae2765b..7736686f328 100644 --- a/torchvision/prototype/datasets/_builtin/pcam.py +++ b/torchvision/prototype/datasets/_builtin/pcam.py @@ -114,7 +114,6 @@ def _prepare_sample(self, data: Tuple[Any, Any]) -> Dict[str, Any]: } def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]: - images_dp, targets_dp = resource_dps images_dp = PCAMH5Reader(images_dp, key="x") diff --git a/torchvision/prototype/datasets/_builtin/semeion.py b/torchvision/prototype/datasets/_builtin/semeion.py index 92e1b93b410..8db5ab1a232 100644 --- a/torchvision/prototype/datasets/_builtin/semeion.py +++ b/torchvision/prototype/datasets/_builtin/semeion.py @@ -25,7 +25,6 @@ class SEMEION(Dataset): """ def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None: - self._categories = _info()["categories"] super().__init__(root, skip_integrity_check=skip_integrity_check) diff --git a/torchvision/prototype/datasets/_builtin/stanford_cars.py b/torchvision/prototype/datasets/_builtin/stanford_cars.py index a76b2dba270..becdacd9e9e 100644 --- a/torchvision/prototype/datasets/_builtin/stanford_cars.py +++ b/torchvision/prototype/datasets/_builtin/stanford_cars.py @@ -94,7 +94,6 @@ def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Tuple[int, int, int, ) def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]: - images_dp, targets_dp = resource_dps if self._split == "train": targets_dp = Filter(targets_dp, path_comparator("name", "cars_train_annos.mat")) diff --git a/torchvision/prototype/datasets/utils/_resource.py b/torchvision/prototype/datasets/utils/_resource.py index af4ede38dc0..9d05044dff3 100644 --- a/torchvision/prototype/datasets/utils/_resource.py +++ b/torchvision/prototype/datasets/utils/_resource.py @@ -186,7 +186,6 @@ def _download(self, root: pathlib.Path) -> None: return self.resolve()._download(root) for url in itertools.chain((self.url,), self.mirrors): - try: download_url(url, str(root), filename=self.file_name, md5=None) # TODO: make this more precise diff --git a/torchvision/prototype/models/depth/stereo/crestereo.py b/torchvision/prototype/models/depth/stereo/crestereo.py index f1b9a7c8774..5a9fbdc20e8 100644 --- a/torchvision/prototype/models/depth/stereo/crestereo.py +++ b/torchvision/prototype/models/depth/stereo/crestereo.py @@ -30,7 +30,6 @@ def __init__( upsample_factor: int, multiplier: float = 0.25, ) -> None: - super().__init__() self.mask_head = nn.Sequential( Conv2dNormActivation(in_channels, hidden_size, norm_layer=None, kernel_size=3), @@ -91,7 +90,6 @@ def _check_window_specs( search_window_2d: Tuple[int, int] = (3, 3), search_dilate_2d: Tuple[int, int] = (1, 1), ) -> None: - if not np.prod(search_window_1d) == np.prod(search_window_2d): raise ValueError( f"The 1D and 2D windows should contain the same number of elements. " @@ -127,7 +125,6 @@ def __init__( search_window_2d: Tuple[int, int] = (3, 3), search_dilate_2d: Tuple[int, int] = (1, 1), ) -> None: - super().__init__() _check_window_specs( search_window_1d=search_window_1d, @@ -986,7 +983,6 @@ def _crestereo( cross_attention_module: Callable[..., nn.Module], **kwargs, ) -> CREStereo: - feature_encoder = kwargs.pop("feature_encoder", None) or raft.FeatureEncoder( block=feature_encoder_block, layers=feature_encoder_layers, diff --git a/torchvision/prototype/transforms/_augment.py b/torchvision/prototype/transforms/_augment.py index d04baf739d1..f2b1cc1f80c 100644 --- a/torchvision/prototype/transforms/_augment.py +++ b/torchvision/prototype/transforms/_augment.py @@ -128,7 +128,6 @@ def _copy_paste( resize_interpolation: F.InterpolationMode, antialias: Optional[bool], ) -> Tuple[datapoints._TensorImageType, Dict[str, Any]]: - paste_masks = paste_target["masks"].wrap_like(paste_target["masks"], paste_target["masks"][random_selection]) paste_boxes = paste_target["boxes"].wrap_like(paste_target["boxes"], paste_target["boxes"][random_selection]) paste_labels = paste_target["labels"].wrap_like( @@ -269,7 +268,6 @@ def forward(self, *inputs: Any) -> Any: output_images, output_targets = [], [] for image, target, paste_image, paste_target in zip(images, targets, images_rolled, targets_rolled): - # Random paste targets selection: num_masks = len(paste_target["masks"]) diff --git a/torchvision/transforms/_functional_pil.py b/torchvision/transforms/_functional_pil.py index 120998d0072..c9da0669c0b 100644 --- a/torchvision/transforms/_functional_pil.py +++ b/torchvision/transforms/_functional_pil.py @@ -124,7 +124,6 @@ def adjust_gamma( gamma: float, gain: float = 1.0, ) -> Image.Image: - if not _is_pil_image(img): raise TypeError(f"img should be PIL Image. Got {type(img)}") @@ -147,7 +146,6 @@ def pad( fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0, padding_mode: Literal["constant", "edge", "reflect", "symmetric"] = "constant", ) -> Image.Image: - if not _is_pil_image(img): raise TypeError(f"img should be PIL Image. Got {type(img)}") @@ -228,7 +226,6 @@ def crop( height: int, width: int, ) -> Image.Image: - if not _is_pil_image(img): raise TypeError(f"img should be PIL Image. Got {type(img)}") @@ -241,7 +238,6 @@ def resize( size: Union[List[int], int], interpolation: int = Image.BILINEAR, ) -> Image.Image: - if not _is_pil_image(img): raise TypeError(f"img should be PIL Image. Got {type(img)}") if not (isinstance(size, list) and len(size) == 2): @@ -256,7 +252,6 @@ def _parse_fill( img: Image.Image, name: str = "fillcolor", ) -> Dict[str, Optional[Union[float, List[float], Tuple[float, ...]]]]: - # Process fill color for affine transforms num_channels = get_image_num_channels(img) if fill is None: @@ -286,7 +281,6 @@ def affine( interpolation: int = Image.NEAREST, fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None, ) -> Image.Image: - if not _is_pil_image(img): raise TypeError(f"img should be PIL Image. Got {type(img)}") @@ -304,7 +298,6 @@ def rotate( center: Optional[Tuple[int, int]] = None, fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None, ) -> Image.Image: - if not _is_pil_image(img): raise TypeError(f"img should be PIL Image. Got {type(img)}") @@ -319,7 +312,6 @@ def perspective( interpolation: int = Image.BICUBIC, fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None, ) -> Image.Image: - if not _is_pil_image(img): raise TypeError(f"img should be PIL Image. Got {type(img)}") diff --git a/torchvision/transforms/_functional_tensor.py b/torchvision/transforms/_functional_tensor.py index d0e7c17882b..8645fff4be0 100644 --- a/torchvision/transforms/_functional_tensor.py +++ b/torchvision/transforms/_functional_tensor.py @@ -66,7 +66,6 @@ def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) - return image if image.is_floating_point(): - # TODO: replace with dtype.is_floating_point when torchscript supports it if torch.tensor(0, dtype=dtype).is_floating_point(): return image.to(dtype) @@ -482,7 +481,6 @@ def _assert_grid_transform_inputs( supported_interpolation_modes: List[str], coeffs: Optional[List[float]] = None, ) -> None: - if not (isinstance(img, torch.Tensor)): raise TypeError("Input img should be Tensor") @@ -545,7 +543,6 @@ def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtyp def _apply_grid_transform( img: Tensor, grid: Tensor, mode: str, fill: Optional[Union[int, float, List[float]]] ) -> Tensor: - img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [grid.dtype]) if img.shape[0] > 1: @@ -619,7 +616,6 @@ def affine( def _compute_affine_output_size(matrix: List[float], w: int, h: int) -> Tuple[int, int]: - # Inspired of PIL implementation: # https://github.com/python-pillow/Pillow/blob/11de3318867e4398057373ee9f12dcb33db7335c/src/PIL/Image.py#L2054 @@ -765,7 +761,6 @@ def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: List[float]) -> Te def invert(img: Tensor) -> Tensor: - _assert_image_tensor(img) if img.ndim < 3: @@ -777,7 +772,6 @@ def invert(img: Tensor) -> Tensor: def posterize(img: Tensor, bits: int) -> Tensor: - _assert_image_tensor(img) if img.ndim < 3: @@ -791,7 +785,6 @@ def posterize(img: Tensor, bits: int) -> Tensor: def solarize(img: Tensor, threshold: float) -> Tensor: - _assert_image_tensor(img) if img.ndim < 3: @@ -839,7 +832,6 @@ def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor: def autocontrast(img: Tensor) -> Tensor: - _assert_image_tensor(img) if img.ndim < 3: @@ -886,7 +878,6 @@ def _equalize_single_image(img: Tensor) -> Tensor: def equalize(img: Tensor) -> Tensor: - _assert_image_tensor(img) if not (3 <= img.ndim <= 4): @@ -950,7 +941,6 @@ def elastic_transform( interpolation: str = "bilinear", fill: Optional[Union[int, float, List[float]]] = None, ) -> Tensor: - if not (isinstance(img, torch.Tensor)): raise TypeError(f"img should be Tensor. Got {type(img)}") diff --git a/torchvision/transforms/transforms.py b/torchvision/transforms/transforms.py index d0290f93249..345f653c2e1 100644 --- a/torchvision/transforms/transforms.py +++ b/torchvision/transforms/transforms.py @@ -1724,7 +1724,6 @@ def forward(self, img): img (Tensor): Erased Tensor image. """ if torch.rand(1) < self.p: - # cast self.value to script acceptable type if isinstance(self.value, (int, float)): value = [float(self.value)] diff --git a/torchvision/transforms/v2/_geometry.py b/torchvision/transforms/v2/_geometry.py index 731d768c2a6..79e355a996a 100644 --- a/torchvision/transforms/v2/_geometry.py +++ b/torchvision/transforms/v2/_geometry.py @@ -1214,7 +1214,6 @@ def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]: return dict(top=top, left=left, height=new_h, width=new_w, is_within_crop_area=is_within_crop_area) def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any: - if len(params) < 1: return inpt diff --git a/torchvision/transforms/v2/_transform.py b/torchvision/transforms/v2/_transform.py index f83ed5d6e11..10cb074f3e5 100644 --- a/torchvision/transforms/v2/_transform.py +++ b/torchvision/transforms/v2/_transform.py @@ -13,7 +13,6 @@ class Transform(nn.Module): - # Class attribute defining transformed types. Other types are passed-through without any transformation # We support both Types and callables that are able to do further checks on the type of the input. _transformed_types: Tuple[Union[Type, Callable[[Any], bool]], ...] = (torch.Tensor, PIL.Image.Image) diff --git a/torchvision/transforms/v2/functional/_geometry.py b/torchvision/transforms/v2/functional/_geometry.py index aab3be24e0b..e3bdc5f396d 100644 --- a/torchvision/transforms/v2/functional/_geometry.py +++ b/torchvision/transforms/v2/functional/_geometry.py @@ -476,7 +476,6 @@ def _compute_affine_output_size(matrix: List[float], w: int, h: int) -> Tuple[in def _apply_grid_transform( img: torch.Tensor, grid: torch.Tensor, mode: str, fill: datapoints._FillTypeJIT ) -> torch.Tensor: - # We are using context knowledge that grid should have float dtype fp = img.dtype == grid.dtype float_img = img if fp else img.to(grid.dtype) @@ -1253,7 +1252,6 @@ def crop_bounding_box( height: int, width: int, ) -> Tuple[torch.Tensor, Tuple[int, int]]: - # Crop or implicit pad if left and/or top have negative values: if format == datapoints.BoundingBoxFormat.XYXY: sub = [left, top, left, top] diff --git a/torchvision/transforms/v2/functional/_meta.py b/torchvision/transforms/v2/functional/_meta.py index 8ffa3966195..56228348855 100644 --- a/torchvision/transforms/v2/functional/_meta.py +++ b/torchvision/transforms/v2/functional/_meta.py @@ -189,7 +189,6 @@ def _xyxy_to_cxcywh(xyxy: torch.Tensor, inplace: bool) -> torch.Tensor: def _convert_format_bounding_box( bounding_box: torch.Tensor, old_format: BoundingBoxFormat, new_format: BoundingBoxFormat, inplace: bool = False ) -> torch.Tensor: - if new_format == old_format: return bounding_box diff --git a/torchvision/utils.py b/torchvision/utils.py index 1418656a7f2..906bbee4a04 100644 --- a/torchvision/utils.py +++ b/torchvision/utils.py @@ -161,7 +161,6 @@ def draw_bounding_boxes( font: Optional[str] = None, font_size: Optional[int] = None, ) -> torch.Tensor: - """ Draws bounding boxes on given image. The values of the input image should be uint8 between 0 and 255. @@ -259,7 +258,6 @@ def draw_segmentation_masks( alpha: float = 0.8, colors: Optional[Union[List[Union[str, Tuple[int, int, int]]], str, Tuple[int, int, int]]] = None, ) -> torch.Tensor: - """ Draws segmentation masks on given RGB image. The values of the input image should be uint8 between 0 and 255. @@ -324,7 +322,6 @@ def draw_keypoints( radius: int = 2, width: int = 3, ) -> torch.Tensor: - """ Draws Keypoints on given RGB image. The values of the input image should be uint8 between 0 and 255. @@ -390,7 +387,6 @@ def draw_keypoints( # Flow visualization code adapted from https://github.com/tomrunia/OpticalFlow_Visualization @torch.no_grad() def flow_to_image(flow: torch.Tensor) -> torch.Tensor: - """ Converts a flow to an RGB image. @@ -424,7 +420,6 @@ def flow_to_image(flow: torch.Tensor) -> torch.Tensor: @torch.no_grad() def _normalized_flow_to_image(normalized_flow: torch.Tensor) -> torch.Tensor: - """ Converts a batch of normalized flow to an RGB image. @@ -553,7 +548,6 @@ def _parse_colors( def _log_api_usage_once(obj: Any) -> None: - """ Logs API usage(module and name) within an organization. In a large ecosystem, it's often useful to track the PyTorch and From 4cf35b38e332c8b1225bc8e93be1a67df7cf8d87 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Thu, 29 Jun 2023 11:13:36 +0200 Subject: [PATCH 2/2] match ufmt / black / usort with pyfmt --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a73d2b28d1e..8707679e899 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,12 +11,12 @@ repos: - id: end-of-file-fixer - repo: https://github.com/omnilib/ufmt - rev: v2.1.0 + rev: v2.0.1 hooks: - id: ufmt additional_dependencies: - - black == 23.3.0 - - usort == 1.0.7 + - black == 22.12.0 + - usort == 1.0.5 - repo: https://github.com/PyCQA/flake8 rev: 6.0.0