From c4fc01b5bcb56f326b45319fa7001e72fb24bbb0 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Thu, 20 May 2021 14:09:02 +0200 Subject: [PATCH 01/16] adopt `torch.testing.assert_close` in test suite --- test/_assert_utils.py | 10 ++ test/common_utils.py | 7 +- test/test_datasets_samplers.py | 21 +-- test/test_datasets_video_utils.py | 31 ++-- test/test_functional_tensor.py | 82 +++++----- test/test_hub.py | 10 +- test/test_image.py | 18 +-- test/test_io.py | 26 ++-- test/test_models.py | 6 +- test/test_models_detection_anchor_utils.py | 9 +- test/test_models_detection_utils.py | 5 +- test/test_onnx.py | 9 +- test/test_ops.py | 83 ++++------ test/test_transforms.py | 171 +++++++++++---------- test/test_transforms_tensor.py | 29 ++-- test/test_transforms_video.py | 3 +- test/test_utils.py | 38 +++-- test/test_video_reader.py | 13 +- 18 files changed, 285 insertions(+), 286 deletions(-) create mode 100644 test/_assert_utils.py diff --git a/test/_assert_utils.py b/test/_assert_utils.py new file mode 100644 index 00000000000..86273555657 --- /dev/null +++ b/test/_assert_utils.py @@ -0,0 +1,10 @@ +"""This is a temporary module and should be removed as soon as torch.testing.assert_equal is supported.""" + +import functools + +import torch.testing + +__all__ = ["assert_equal"] + + +assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0) diff --git a/test/common_utils.py b/test/common_utils.py index 1f48e8c649b..cc7d9c611b8 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -20,6 +20,8 @@ import numpy as np from PIL import Image +from _assert_utils import assert_equal + IS_PY39 = sys.version_info.major == 3 and sys.version_info.minor == 9 PY39_SEGFAULT_SKIP_MSG = "Segmentation fault with Python 3.9, see https://github.com/pytorch/vision/issues/3367" PY39_SKIP = unittest.skipIf(IS_PY39, PY39_SEGFAULT_SKIP_MSG) @@ -139,7 +141,8 @@ def assertExpected(self, output, name, prec=None): raise RuntimeError("The output for {}, is larger than 50kb".format(filename)) else: expected = torch.load(expected_file) - self.assertEqual(output, expected, prec=prec) + rtol = atol = prec or self.precision + torch.testing.assert_close(output, expected, rtol=rtol, atol=atol) def assertEqual(self, x, y, prec=None, message='', allow_inf=False): """ @@ -345,7 +348,7 @@ def compareTensorToPIL(self, tensor, pil_image, msg=None): pil_tensor = torch.as_tensor(np_pil_image.transpose((2, 0, 1))) if msg is None: msg = "tensor:\n{} \ndid not equal PIL tensor:\n{}".format(tensor, pil_tensor) - self.assertTrue(tensor.cpu().equal(pil_tensor), msg) + assert_equal(tensor.cpu(), pil_tensor, msg=msg) def approxEqualTensorToPIL(self, tensor, pil_image, tol=1e-5, msg=None, agg_method="mean", allowed_percentage_diff=None): diff --git a/test/test_datasets_samplers.py b/test/test_datasets_samplers.py index e76f4f9d007..10d8704dbb1 100644 --- a/test/test_datasets_samplers.py +++ b/test/test_datasets_samplers.py @@ -14,6 +14,7 @@ from torchvision import get_video_backend from common_utils import get_tmp_dir +from _assert_utils import assert_equal @contextlib.contextmanager @@ -47,8 +48,8 @@ def test_random_clip_sampler(self): indices = torch.tensor(list(iter(sampler))) videos = torch.div(indices, 5, rounding_mode='floor') v_idxs, count = torch.unique(videos, return_counts=True) - self.assertTrue(v_idxs.equal(torch.tensor([0, 1, 2]))) - self.assertTrue(count.equal(torch.tensor([3, 3, 3]))) + assert_equal(v_idxs, torch.tensor([0, 1, 2])) + assert_equal(count, torch.tensor([3, 3, 3])) def test_random_clip_sampler_unequal(self): with get_list_of_videos(num_videos=3, sizes=[10, 25, 25]) as video_list: @@ -64,8 +65,8 @@ def test_random_clip_sampler_unequal(self): indices = torch.tensor(indices) - 2 videos = torch.div(indices, 5, rounding_mode='floor') v_idxs, count = torch.unique(videos, return_counts=True) - self.assertTrue(v_idxs.equal(torch.tensor([0, 1]))) - self.assertTrue(count.equal(torch.tensor([3, 3]))) + assert_equal(v_idxs, torch.tensor([0, 1])) + assert_equal(count, torch.tensor([3, 3])) def test_uniform_clip_sampler(self): with get_list_of_videos(num_videos=3, sizes=[25, 25, 25]) as video_list: @@ -75,9 +76,9 @@ def test_uniform_clip_sampler(self): indices = torch.tensor(list(iter(sampler))) videos = torch.div(indices, 5, rounding_mode='floor') v_idxs, count = torch.unique(videos, return_counts=True) - self.assertTrue(v_idxs.equal(torch.tensor([0, 1, 2]))) - self.assertTrue(count.equal(torch.tensor([3, 3, 3]))) - self.assertTrue(indices.equal(torch.tensor([0, 2, 4, 5, 7, 9, 10, 12, 14]))) + assert_equal(v_idxs, torch.tensor([0, 1, 2])) + assert_equal(count, torch.tensor([3, 3, 3])) + assert_equal(indices, torch.tensor([0, 2, 4, 5, 7, 9, 10, 12, 14])) def test_uniform_clip_sampler_insufficient_clips(self): with get_list_of_videos(num_videos=3, sizes=[10, 25, 25]) as video_list: @@ -85,7 +86,7 @@ def test_uniform_clip_sampler_insufficient_clips(self): sampler = UniformClipSampler(video_clips, 3) self.assertEqual(len(sampler), 3 * 3) indices = torch.tensor(list(iter(sampler))) - self.assertTrue(indices.equal(torch.tensor([0, 0, 1, 2, 4, 6, 7, 9, 11]))) + assert_equal(indices, torch.tensor([0, 0, 1, 2, 4, 6, 7, 9, 11])) def test_distributed_sampler_and_uniform_clip_sampler(self): with get_list_of_videos(num_videos=3, sizes=[25, 25, 25]) as video_list: @@ -100,7 +101,7 @@ def test_distributed_sampler_and_uniform_clip_sampler(self): ) indices = torch.tensor(list(iter(distributed_sampler_rank0))) self.assertEqual(len(distributed_sampler_rank0), 6) - self.assertTrue(indices.equal(torch.tensor([0, 2, 4, 10, 12, 14]))) + assert_equal(indices, torch.tensor([0, 2, 4, 10, 12, 14])) distributed_sampler_rank1 = DistributedSampler( clip_sampler, @@ -110,7 +111,7 @@ def test_distributed_sampler_and_uniform_clip_sampler(self): ) indices = torch.tensor(list(iter(distributed_sampler_rank1))) self.assertEqual(len(distributed_sampler_rank1), 6) - self.assertTrue(indices.equal(torch.tensor([5, 7, 9, 0, 2, 4]))) + assert_equal(indices, torch.tensor([5, 7, 9, 0, 2, 4])) if __name__ == '__main__': diff --git a/test/test_datasets_video_utils.py b/test/test_datasets_video_utils.py index 694214544f7..9cd3b775934 100644 --- a/test/test_datasets_video_utils.py +++ b/test/test_datasets_video_utils.py @@ -7,6 +7,7 @@ from torchvision.datasets.video_utils import VideoClips, unfold from common_utils import get_tmp_dir +from _assert_utils import assert_equal @contextlib.contextmanager @@ -40,7 +41,7 @@ def test_unfold(self): [0, 1, 2], [3, 4, 5], ]) - self.assertTrue(r.equal(expected)) + assert_equal(r, expected) r = unfold(a, 3, 2, 1) expected = torch.tensor([ @@ -48,14 +49,14 @@ def test_unfold(self): [2, 3, 4], [4, 5, 6] ]) - self.assertTrue(r.equal(expected)) + assert_equal(r, expected) r = unfold(a, 3, 2, 2) expected = torch.tensor([ [0, 2, 4], [2, 4, 6], ]) - self.assertTrue(r.equal(expected)) + assert_equal(r, expected) @unittest.skipIf(not io.video._av_available(), "this test requires av") def test_video_clips(self): @@ -64,22 +65,22 @@ def test_video_clips(self): self.assertEqual(video_clips.num_clips(), 1 + 2 + 3) for i, (v_idx, c_idx) in enumerate([(0, 0), (1, 0), (1, 1), (2, 0), (2, 1), (2, 2)]): video_idx, clip_idx = video_clips.get_clip_location(i) - self.assertEqual(video_idx, v_idx) - self.assertEqual(clip_idx, c_idx) + assert_equal(video_idx, v_idx) + assert_equal(clip_idx, c_idx) video_clips = VideoClips(video_list, 6, 6) self.assertEqual(video_clips.num_clips(), 0 + 1 + 2) for i, (v_idx, c_idx) in enumerate([(1, 0), (2, 0), (2, 1)]): video_idx, clip_idx = video_clips.get_clip_location(i) - self.assertEqual(video_idx, v_idx) - self.assertEqual(clip_idx, c_idx) + assert_equal(video_idx, v_idx) + assert_equal(clip_idx, c_idx) video_clips = VideoClips(video_list, 6, 1) self.assertEqual(video_clips.num_clips(), 0 + (10 - 6 + 1) + (15 - 6 + 1)) for i, v_idx, c_idx in [(0, 1, 0), (4, 1, 4), (5, 2, 0), (6, 2, 1)]: video_idx, clip_idx = video_clips.get_clip_location(i) - self.assertEqual(video_idx, v_idx) - self.assertEqual(clip_idx, c_idx) + assert_equal(video_idx, v_idx) + assert_equal(clip_idx, c_idx) @unittest.skipIf(not io.video._av_available(), "this test requires av") def test_video_clips_custom_fps(self): @@ -89,8 +90,8 @@ def test_video_clips_custom_fps(self): video_clips = VideoClips(video_list, num_frames, num_frames, fps, num_workers=2) for i in range(video_clips.num_clips()): video, audio, info, video_idx = video_clips.get_clip(i) - self.assertEqual(video.shape[0], num_frames) - self.assertEqual(info["video_fps"], fps) + assert_equal(video.shape[0], num_frames) + assert_equal(info["video_fps"], fps) # TODO add tests checking that the content is right def test_compute_clips_for_video(self): @@ -104,8 +105,8 @@ def test_compute_clips_for_video(self): orig_fps, new_fps) resampled_idxs = VideoClips._resample_video_idx(int(duration * new_fps), orig_fps, new_fps) self.assertEqual(len(clips), 1) - self.assertTrue(clips.equal(idxs)) - self.assertTrue(idxs[0].equal(resampled_idxs)) + assert_equal(clips, idxs) + assert_equal(idxs[0], resampled_idxs) # case 2: all frames appear only once num_frames = 4 @@ -116,8 +117,8 @@ def test_compute_clips_for_video(self): orig_fps, new_fps) resampled_idxs = VideoClips._resample_video_idx(int(duration * new_fps), orig_fps, new_fps) self.assertEqual(len(clips), 3) - self.assertTrue(clips.equal(idxs)) - self.assertTrue(idxs.flatten().equal(resampled_idxs)) + assert_equal(clips, idxs) + assert_equal(idxs.flatten(), resampled_idxs) # case 3: frames aren't enough for a clip num_frames = 32 diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index 31a1c1a43e8..65c04c685eb 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -15,6 +15,7 @@ from torchvision.transforms import InterpolationMode from common_utils import TransformsTester, cpu_and_gpu, needs_cuda +from _assert_utils import assert_equal from typing import Dict, List, Sequence, Tuple @@ -39,13 +40,13 @@ def _test_fn_on_batch(self, batch_tensors, fn, scripted_fn_atol=1e-8, **fn_kwarg for i in range(len(batch_tensors)): img_tensor = batch_tensors[i, ...] transformed_img = fn(img_tensor, **fn_kwargs) - self.assertTrue(transformed_img.equal(transformed_batch[i, ...])) + assert_equal(transformed_img, transformed_batch[i, ...]) if scripted_fn_atol >= 0: scripted_fn = torch.jit.script(fn) # scriptable function test s_transformed_batch = scripted_fn(batch_tensors, **fn_kwargs) - self.assertTrue(transformed_batch.allclose(s_transformed_batch, atol=scripted_fn_atol)) + torch.testing.assert_close(transformed_batch, s_transformed_batch, rtol=1e-5, atol=scripted_fn_atol) def test_assert_image_tensor(self): shape = (100,) @@ -79,7 +80,7 @@ def test_vflip(self): # scriptable function test vflipped_img_script = script_vflip(img_tensor) - self.assertTrue(vflipped_img.equal(vflipped_img_script)) + assert_equal(vflipped_img, vflipped_img_script) batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device) self._test_fn_on_batch(batch_tensors, F.vflip) @@ -94,7 +95,7 @@ def test_hflip(self): # scriptable function test hflipped_img_script = script_hflip(img_tensor) - self.assertTrue(hflipped_img.equal(hflipped_img_script)) + assert_equal(hflipped_img, hflipped_img_script) batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device) self._test_fn_on_batch(batch_tensors, F.hflip) @@ -140,11 +141,10 @@ def test_hsv2rgb(self): for h1, s1, v1 in zip(h, s, v): rgb.append(colorsys.hsv_to_rgb(h1, s1, v1)) colorsys_img = torch.tensor(rgb, dtype=torch.float32, device=self.device) - max_diff = (ft_img - colorsys_img).abs().max() - self.assertLess(max_diff, 1e-5) + torch.testing.assert_close(ft_img, colorsys_img, rtol=0.0, atol=1e-5) s_rgb_img = scripted_fn(hsv_img) - self.assertTrue(rgb_img.allclose(s_rgb_img)) + torch.testing.assert_close(rgb_img, s_rgb_img) batch_tensors = self._create_data_batch(120, 100, num_samples=4, device=self.device).float() self._test_fn_on_batch(batch_tensors, F_t._hsv2rgb) @@ -177,7 +177,7 @@ def test_rgb2hsv(self): self.assertLess(max_diff, 1e-5) s_hsv_img = scripted_fn(rgb_img) - self.assertTrue(hsv_img.allclose(s_hsv_img, atol=1e-7)) + torch.testing.assert_close(hsv_img, s_hsv_img, rtol=1e-5, atol=1e-7) batch_tensors = self._create_data_batch(120, 100, num_samples=4, device=self.device).float() self._test_fn_on_batch(batch_tensors, F_t._rgb2hsv) @@ -194,7 +194,7 @@ def test_rgb_to_grayscale(self): self.approxEqualTensorToPIL(gray_tensor.float(), gray_pil_image, tol=1.0 + 1e-10, agg_method="max") s_gray_tensor = script_rgb_to_grayscale(img_tensor, num_output_channels=num_output_channels) - self.assertTrue(s_gray_tensor.equal(gray_tensor)) + assert_equal(s_gray_tensor, gray_tensor) batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device) self._test_fn_on_batch(batch_tensors, F.rgb_to_grayscale, num_output_channels=num_output_channels) @@ -240,12 +240,12 @@ def test_five_crop(self): for j in range(len(tuple_transformed_imgs)): true_transformed_img = tuple_transformed_imgs[j] transformed_img = tuple_transformed_batches[j][i, ...] - self.assertTrue(true_transformed_img.equal(transformed_img)) + assert_equal(true_transformed_img, transformed_img) # scriptable function test s_tuple_transformed_batches = script_five_crop(batch_tensors, [10, 11]) for transformed_batch, s_transformed_batch in zip(tuple_transformed_batches, s_tuple_transformed_batches): - self.assertTrue(transformed_batch.equal(s_transformed_batch)) + assert_equal(transformed_batch, s_transformed_batch) def test_ten_crop(self): script_ten_crop = torch.jit.script(F.ten_crop) @@ -272,12 +272,12 @@ def test_ten_crop(self): for j in range(len(tuple_transformed_imgs)): true_transformed_img = tuple_transformed_imgs[j] transformed_img = tuple_transformed_batches[j][i, ...] - self.assertTrue(true_transformed_img.equal(transformed_img)) + assert_equal(true_transformed_img, transformed_img) # scriptable function test s_tuple_transformed_batches = script_ten_crop(batch_tensors, [10, 11]) for transformed_batch, s_transformed_batch in zip(tuple_transformed_batches, s_tuple_transformed_batches): - self.assertTrue(transformed_batch.equal(s_transformed_batch)) + assert_equal(transformed_batch, s_transformed_batch) def test_pad(self): script_fn = torch.jit.script(F.pad) @@ -320,7 +320,7 @@ def test_pad(self): else: script_pad = pad pad_tensor_script = script_fn(tensor, script_pad, **kwargs) - self.assertTrue(pad_tensor.equal(pad_tensor_script), msg="{}, {}".format(pad, kwargs)) + assert_equal(pad_tensor, pad_tensor_script, msg="{}, {}".format(pad, kwargs)) self._test_fn_on_batch(batch_tensors, F.pad, padding=script_pad, **kwargs) @@ -348,9 +348,10 @@ def test_resize(self): resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, max_size=max_size) resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation, max_size=max_size) - self.assertEqual( - resized_tensor.size()[1:], resized_pil_img.size[::-1], - msg="{}, {}".format(size, interpolation) + assert_equal( + resized_tensor.size()[1:], + resized_pil_img.size[::-1], + msg="{}, {}".format(size, interpolation), ) if interpolation not in [NEAREST, ]: @@ -374,7 +375,7 @@ def test_resize(self): resize_result = script_fn(tensor, size=script_size, interpolation=interpolation, max_size=max_size) - self.assertTrue(resized_tensor.equal(resize_result), msg="{}, {}".format(size, interpolation)) + assert_equal(resized_tensor, resize_result, msg="{}, {}".format(size, interpolation)) self._test_fn_on_batch( batch_tensors, F.resize, size=script_size, interpolation=interpolation, max_size=max_size @@ -384,7 +385,7 @@ def test_resize(self): with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"): res1 = F.resize(tensor, size=32, interpolation=2) res2 = F.resize(tensor, size=32, interpolation=BILINEAR) - self.assertTrue(res1.equal(res2)) + assert_equal(res1, res2) for img in (tensor, pil_img): exp_msg = "max_size should only be passed if size specifies the length of the smaller edge" @@ -400,15 +401,16 @@ def test_resized_crop(self): for mode in [NEAREST, BILINEAR, BICUBIC]: out_tensor = F.resized_crop(tensor, top=0, left=0, height=26, width=36, size=[26, 36], interpolation=mode) - self.assertTrue(tensor.equal(out_tensor), msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])) + assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])) # 2) resize by half and crop a TL corner tensor, _ = self._create_data(26, 36, device=self.device) out_tensor = F.resized_crop(tensor, top=0, left=0, height=20, width=30, size=[10, 15], interpolation=NEAREST) expected_out_tensor = tensor[:, :20:2, :30:2] - self.assertTrue( - expected_out_tensor.equal(out_tensor), - msg="{} vs {}".format(expected_out_tensor[0, :10, :10], out_tensor[0, :10, :10]) + assert_equal( + expected_out_tensor, + out_tensor, + msg="{} vs {}".format(expected_out_tensor[0, :10, :10], out_tensor[0, :10, :10]), ) batch_tensors = self._create_data_batch(26, 36, num_samples=4, device=self.device) @@ -420,15 +422,11 @@ def _test_affine_identity_map(self, tensor, scripted_affine): # 1) identity map out_tensor = F.affine(tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST) - self.assertTrue( - tensor.equal(out_tensor), msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5]) - ) + assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])) out_tensor = scripted_affine( tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST ) - self.assertTrue( - tensor.equal(out_tensor), msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5]) - ) + assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])) def _test_affine_square_rotations(self, tensor, pil_img, scripted_affine): # 2) Test rotation @@ -452,9 +450,10 @@ def _test_affine_square_rotations(self, tensor, pil_img, scripted_affine): tensor, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST ) if true_tensor is not None: - self.assertTrue( - true_tensor.equal(out_tensor), - msg="{}\n{} vs \n{}".format(a, out_tensor[0, :5, :5], true_tensor[0, :5, :5]) + assert_equal( + true_tensor, + out_tensor, + msg="{}\n{} vs \n{}".format(a, out_tensor[0, :5, :5], true_tensor[0, :5, :5]), ) if out_tensor.dtype != torch.uint8: @@ -593,18 +592,18 @@ def test_affine(self): with self.assertWarnsRegex(UserWarning, r"Argument resample is deprecated and will be removed"): res1 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=2) res2 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=BILINEAR) - self.assertTrue(res1.equal(res2)) + assert_equal(res1, res2) # assert changed type warning with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"): res1 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=2) res2 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=BILINEAR) - self.assertTrue(res1.equal(res2)) + assert_equal(res1, res2) with self.assertWarnsRegex(UserWarning, r"Argument fillcolor is deprecated and will be removed"): res1 = F.affine(pil_img, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], fillcolor=10) res2 = F.affine(pil_img, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], fill=10) - self.assertEqual(res1, res2) + assert_equal(res1, res2) def _test_rotate_all_options(self, tensor, pil_img, scripted_rotate, centers): img_size = pil_img.size @@ -682,13 +681,13 @@ def test_rotate(self): with self.assertWarnsRegex(UserWarning, r"Argument resample is deprecated and will be removed"): res1 = F.rotate(tensor, 45, resample=2) res2 = F.rotate(tensor, 45, interpolation=BILINEAR) - self.assertTrue(res1.equal(res2)) + assert_equal(res1, res2) # assert changed type warning with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"): res1 = F.rotate(tensor, 45, interpolation=2) res2 = F.rotate(tensor, 45, interpolation=BILINEAR) - self.assertTrue(res1.equal(res2)) + assert_equal(res1, res2) def test_gaussian_blur(self): small_image_tensor = torch.from_numpy( @@ -747,11 +746,8 @@ def test_gaussian_blur(self): for fn in [F.gaussian_blur, scripted_transform]: out = fn(tensor, kernel_size=ksize, sigma=sigma) - self.assertEqual(true_out.shape, out.shape, msg="{}, {}".format(ksize, sigma)) - self.assertLessEqual( - torch.max(true_out.float() - out.float()), - 1.0, - msg="{}, {}".format(ksize, sigma) + torch.testing.assert_close( + out, true_out, rtol=0.0, atol=1.0, msg="{}, {}".format(ksize, sigma) ) @@ -771,7 +767,7 @@ def test_scale_channel(self): img_chan = torch.randint(0, 256, size=size).to('cpu') scaled_cpu = F_t._scale_channel(img_chan) scaled_cuda = F_t._scale_channel(img_chan.to('cuda')) - self.assertTrue(scaled_cpu.equal(scaled_cuda.to('cpu'))) + assert_equal(scaled_cpu, scaled_cuda.to('cpu')) def _get_data_dims_and_points_for_perspective(): diff --git a/test/test_hub.py b/test/test_hub.py index 29ae90014d1..7050baaab4c 100644 --- a/test/test_hub.py +++ b/test/test_hub.py @@ -5,6 +5,8 @@ import sys import unittest +import torch.testing + def sum_of_model_parameters(model): s = 0 @@ -31,9 +33,7 @@ def test_load_from_github(self): 'resnet18', pretrained=True, progress=False) - self.assertAlmostEqual(sum_of_model_parameters(hub_model).item(), - SUM_OF_PRETRAINED_RESNET18_PARAMS, - places=2) + torch.testing.assert_close(sum_of_model_parameters(hub_model).item(), SUM_OF_PRETRAINED_RESNET18_PARAMS) def test_set_dir(self): temp_dir = tempfile.gettempdir() @@ -43,9 +43,7 @@ def test_set_dir(self): 'resnet18', pretrained=True, progress=False) - self.assertAlmostEqual(sum_of_model_parameters(hub_model).item(), - SUM_OF_PRETRAINED_RESNET18_PARAMS, - places=2) + torch.testing.assert_close(sum_of_model_parameters(hub_model).item(), SUM_OF_PRETRAINED_RESNET18_PARAMS) self.assertTrue(os.path.exists(temp_dir + '/pytorch_vision_master')) shutil.rmtree(temp_dir + '/pytorch_vision_master') diff --git a/test/test_image.py b/test/test_image.py index 11c8f3d7a03..da1f6a636eb 100644 --- a/test/test_image.py +++ b/test/test_image.py @@ -8,6 +8,7 @@ import torch from PIL import Image from common_utils import get_tmp_dir, needs_cuda +from _assert_utils import assert_equal from torchvision.io.image import ( decode_png, decode_jpeg, encode_jpeg, write_jpeg, decode_image, read_file, @@ -64,8 +65,7 @@ def test_decode_jpeg(self): # Permit a small variation on pixel values to account for implementation # differences between Pillow and LibJPEG. - abs_mean_diff = (img_ljpeg.type(torch.float32) - img_pil).abs().mean().item() - self.assertTrue(abs_mean_diff < 2) + torch.testing.assert_close(img_ljpeg, img_pil, rtol=0.0, atol=2.0) with self.assertRaisesRegex(RuntimeError, "Expected a non empty 1-dimensional tensor"): decode_jpeg(torch.empty((100, 1), dtype=torch.uint8)) @@ -107,7 +107,7 @@ def test_encode_jpeg(self): for src_img in [img, img.contiguous()]: # PIL sets jpeg quality to 75 by default jpeg_bytes = encode_jpeg(src_img, quality=75) - self.assertTrue(jpeg_bytes.equal(pil_bytes)) + assert_equal(jpeg_bytes, pil_bytes) with self.assertRaisesRegex( RuntimeError, "Input tensor dtype should be uint8"): @@ -191,7 +191,7 @@ def test_encode_png(self): rec_img = torch.from_numpy(np.array(rec_img)) rec_img = rec_img.permute(2, 0, 1) - self.assertTrue(img_pil.equal(rec_img)) + assert_equal(img_pil, rec_img) with self.assertRaisesRegex( RuntimeError, "Input tensor dtype should be uint8"): @@ -224,7 +224,7 @@ def test_write_png(self): saved_image = torch.from_numpy(np.array(Image.open(torch_png))) saved_image = saved_image.permute(2, 0, 1) - self.assertTrue(img_pil.equal(saved_image)) + assert_equal(img_pil, saved_image) def test_read_file(self): with get_tmp_dir() as d: @@ -235,7 +235,7 @@ def test_read_file(self): data = read_file(fpath) expected = torch.tensor(list(content), dtype=torch.uint8) - self.assertTrue(data.equal(expected)) + assert_equal(data, expected) os.unlink(fpath) with self.assertRaisesRegex( @@ -251,7 +251,7 @@ def test_read_file_non_ascii(self): data = read_file(fpath) expected = torch.tensor(list(content), dtype=torch.uint8) - self.assertTrue(data.equal(expected)) + assert_equal(data, expected) os.unlink(fpath) def test_write_file(self): @@ -290,10 +290,10 @@ def test_decode_jpeg_cuda(mode, img_path, scripted): data = read_file(img_path) img = decode_image(data, mode=mode) f = torch.jit.script(decode_jpeg) if scripted else decode_jpeg - img_nvjpeg = f(data, mode=mode, device='cuda') + img_nvjpeg = f(data, mode=mode, device='cuda').cpu() # Some difference expected between jpeg implementations - tester.assertTrue((img.float() - img_nvjpeg.cpu().float()).abs().mean() < 2) + torch.testing.assert_close(img, img_nvjpeg, rtol=0.0, atol=2.0, check_stride=False) @needs_cuda diff --git a/test/test_io.py b/test/test_io.py index 7d752bdbcf7..e86ea9e84fc 100644 --- a/test/test_io.py +++ b/test/test_io.py @@ -10,6 +10,7 @@ from urllib.error import URLError from common_utils import get_tmp_dir +from _assert_utils import assert_equal try: @@ -74,7 +75,7 @@ class TestIO(unittest.TestCase): def test_write_read_video(self): with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data): lv, _, info = io.read_video(f_name) - self.assertTrue(data.equal(lv)) + assert_equal(data, lv) self.assertEqual(info["video_fps"], 5) @unittest.skipIf(not io._HAS_VIDEO_OPT, "video_reader backend is not chosen") @@ -116,14 +117,14 @@ def test_read_partial_video(self): lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1]) s_data = data[start:(start + offset)] self.assertEqual(len(lv), offset) - self.assertTrue(s_data.equal(lv)) + assert_equal(s_data, lv) if get_video_backend() == "pyav": # for "video_reader" backend, we don't decode the closest early frame # when the given start pts is not matching any frame pts lv, _, _ = io.read_video(f_name, pts[4] + 1, pts[7]) self.assertEqual(len(lv), 4) - self.assertTrue(data[4:8].equal(lv)) + assert_equal(data[4:8], lv) def test_read_partial_video_bframes(self): # do not use lossless encoding, to test the presence of B-frames @@ -135,16 +136,16 @@ def test_read_partial_video_bframes(self): lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1]) s_data = data[start:(start + offset)] self.assertEqual(len(lv), offset) - self.assertTrue((s_data.float() - lv.float()).abs().max() < self.TOLERANCE) + assert_equal(s_data, lv, rtol=0.0, atol=self.TOLERANCE) lv, _, _ = io.read_video(f_name, pts[4] + 1, pts[7]) # TODO fix this if get_video_backend() == 'pyav': self.assertEqual(len(lv), 4) - self.assertTrue((data[4:8].float() - lv.float()).abs().max() < self.TOLERANCE) + assert_equal(data[4:8], lv, rtol=0.0, atol=self.TOLERANCE) else: self.assertEqual(len(lv), 3) - self.assertTrue((data[5:8].float() - lv.float()).abs().max() < self.TOLERANCE) + assert_equal(data[5:8], lv, rtol=0.0, atol=self.TOLERANCE) def test_read_packed_b_frames_divx_file(self): name = "hmdb51_Turnk_r_Pippi_Michel_cartwheel_f_cm_np2_le_med_6.avi" @@ -175,7 +176,7 @@ def test_read_video_pts_unit_sec(self): with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data): lv, _, info = io.read_video(f_name, pts_unit='sec') - self.assertTrue(data.equal(lv)) + assert_equal(data, lv) self.assertEqual(info["video_fps"], 5) self.assertEqual(info, {"video_fps": 5}) @@ -201,7 +202,7 @@ def test_read_partial_video_pts_unit_sec(self): lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1], pts_unit='sec') s_data = data[start:(start + offset)] self.assertEqual(len(lv), offset) - self.assertTrue(s_data.equal(lv)) + assert_equal(s_data, lv) container = av.open(f_name) stream = container.streams[0] @@ -212,7 +213,7 @@ def test_read_partial_video_pts_unit_sec(self): # for "video_reader" backend, we don't decode the closest early frame # when the given start pts is not matching any frame pts self.assertEqual(len(lv), 4) - self.assertTrue(data[4:8].equal(lv)) + assert_equal(data[4:8], lv) container.close() def test_read_video_corrupted_file(self): @@ -251,9 +252,10 @@ def test_read_video_partially_corrupted_file(self): else: self.assertEqual(len(video), 4) # but the valid decoded content is still correct - self.assertTrue(video[:3].equal(data[:3])) + assert_equal(video[:3], data[:3]) # and the last few frames are wrong - self.assertFalse(video.equal(data)) + with self.assertRaises(AssertionError): + assert_equal(video, data) @unittest.skipIf(sys.platform == 'win32', 'temporarily disabled on Windows') def test_write_video_with_audio(self): @@ -278,7 +280,7 @@ def test_write_video_with_audio(self): ) self.assertEqual(info["video_fps"], out_info["video_fps"]) - self.assertTrue(video_tensor.equal(out_video_tensor)) + assert_equal(video_tensor, out_video_tensor) audio_stream = av.open(f_name).streams.audio[0] out_audio_stream = av.open(out_f_name).streams.audio[0] diff --git a/test/test_models.py b/test/test_models.py index 401c4175ccf..a9e26a3f159 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -120,7 +120,7 @@ def check_out(out): # predictions match. expected_file = self._get_expected_file(name) expected = torch.load(expected_file) - self.assertEqual(out.argmax(dim=1), expected.argmax(dim=1), prec=prec) + torch.testing.assert_close(out.argmax(dim=1), expected.argmax(dim=1), rtol=prec, atol=prec) return False # Partial validation performed return True # Full validation performed @@ -205,7 +205,7 @@ def compute_mean_std(tensor): # scores. expected_file = self._get_expected_file(name) expected = torch.load(expected_file) - self.assertEqual(output[0]["scores"], expected[0]["scores"], prec=prec) + torch.testing.assert_close(output[0]["scores"], expected[0]["scores"], rtol=prec, atol=prec) # Note: Fmassa proposed turning off NMS by adapting the threshold # and then using the Hungarian algorithm as in DETR to find the @@ -304,7 +304,7 @@ def test_memory_efficient_densenet(self): max_diff = (out1 - out2).abs().max() self.assertTrue(num_params == num_grad) - self.assertTrue(max_diff < 1e-5) + torch.testing.assert_close(out1, out2, rtol=0.0, atol=1e-5) def test_resnet_dilation(self): # TODO improve tests to also check that each layer has the right dimensionality diff --git a/test/test_models_detection_anchor_utils.py b/test/test_models_detection_anchor_utils.py index ed1b06b5f96..13c399a0c32 100644 --- a/test/test_models_detection_anchor_utils.py +++ b/test/test_models_detection_anchor_utils.py @@ -1,5 +1,6 @@ import torch from common_utils import TestCase +from _assert_utils import assert_equal from torchvision.models.detection.anchor_utils import AnchorGenerator, DefaultBoxGenerator from torchvision.models.detection.image_list import ImageList @@ -62,8 +63,8 @@ def test_anchor_generator(self): self.assertEqual(len(anchors), 2) self.assertEqual(tuple(anchors[0].shape), (9, 4)) self.assertEqual(tuple(anchors[1].shape), (9, 4)) - self.assertEqual(anchors[0], anchors_output) - self.assertEqual(anchors[1], anchors_output) + assert_equal(anchors[0], anchors_output) + assert_equal(anchors[1], anchors_output) def test_defaultbox_generator(self): images = torch.zeros(2, 3, 15, 15) @@ -85,5 +86,5 @@ def test_defaultbox_generator(self): self.assertEqual(len(dboxes), 2) self.assertEqual(tuple(dboxes[0].shape), (4, 4)) self.assertEqual(tuple(dboxes[1].shape), (4, 4)) - self.assertTrue(dboxes[0].allclose(dboxes_output)) - self.assertTrue(dboxes[1].allclose(dboxes_output)) + torch.testing.assert_close(dboxes[0], dboxes_output, rtol=1e-5, atol=1e-8) + torch.testing.assert_close(dboxes[1], dboxes_output, rtol=1e-5, atol=1e-8) diff --git a/test/test_models_detection_utils.py b/test/test_models_detection_utils.py index f61d825e0d8..a20e0abc965 100644 --- a/test/test_models_detection_utils.py +++ b/test/test_models_detection_utils.py @@ -4,6 +4,7 @@ from torchvision.models.detection.transform import GeneralizedRCNNTransform import unittest from torchvision.models.detection import backbone_utils +from _assert_utils import assert_equal class Tester(unittest.TestCase): @@ -55,8 +56,8 @@ def test_transform_copy_targets(self): targets = [{'boxes': torch.rand(3, 4)}, {'boxes': torch.rand(2, 4)}] targets_copy = copy.deepcopy(targets) out = transform(image, targets) # noqa: F841 - self.assertTrue(torch.equal(targets[0]['boxes'], targets_copy[0]['boxes'])) - self.assertTrue(torch.equal(targets[1]['boxes'], targets_copy[1]['boxes'])) + assert_equal(targets[0]['boxes'], targets_copy[0]['boxes']) + assert_equal(targets[1]['boxes'], targets_copy[1]['boxes']) def test_not_float_normalize(self): transform = GeneralizedRCNNTransform(300, 500, torch.zeros(3), torch.ones(3)) diff --git a/test/test_onnx.py b/test/test_onnx.py index 63f182004b8..d0140c79dfc 100644 --- a/test/test_onnx.py +++ b/test/test_onnx.py @@ -7,6 +7,7 @@ onnxruntime = None from common_utils import set_rng_seed +from _assert_utils import assert_equal import io import torch from torchvision import ops @@ -483,8 +484,8 @@ def test_heatmaps_to_keypoints(self): jit_trace = torch.jit.trace(heatmaps_to_keypoints, (maps, rois)) out_trace = jit_trace(maps, rois) - assert torch.all(out[0].eq(out_trace[0])) - assert torch.all(out[1].eq(out_trace[1])) + assert_equal(out[0], out_trace[0]) + assert_equal(out[1], out_trace[1]) maps2 = torch.rand(20, 2, 21, 21) rois2 = torch.rand(20, 4) @@ -492,8 +493,8 @@ def test_heatmaps_to_keypoints(self): out2 = heatmaps_to_keypoints(maps2, rois2) out_trace2 = jit_trace(maps2, rois2) - assert torch.all(out2[0].eq(out_trace2[0])) - assert torch.all(out2[1].eq(out_trace2[1])) + assert_equal(out2[0], out_trace2[0]) + assert_equal(out2[1], out_trace2[1]) def test_keypoint_rcnn(self): images, test_images = self.get_test_images() diff --git a/test/test_ops.py b/test/test_ops.py index 2e9fac8bc42..180ef571bba 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -1,4 +1,5 @@ from common_utils import needs_cuda, cpu_only +from _assert_utils import assert_equal import math import unittest import pytest @@ -78,7 +79,8 @@ def _test_forward(self, device, contiguous, x_dtype=None, rois_dtype=None, **kwa sampling_ratio=-1, device=device, dtype=self.dtype, **kwargs) tol = 1e-3 if (x_dtype is torch.half or rois_dtype is torch.half) else 1e-5 - self.assertTrue(torch.allclose(gt_y.to(y.dtype), y, rtol=tol, atol=tol)) + torch.testing.assert_close(gt_y.to(y), y, rtol=tol, atol=tol) + # self.assertTrue(torch.allclose(gt_y.to(y.dtype), y, )) def _test_backward(self, device, contiguous): pool_size = 2 @@ -363,7 +365,7 @@ def make_rois(num_rois=1000): abs_diff = torch.abs(qy[diff_idx].dequantize() - quantized_float_y[diff_idx].dequantize()) t_scale = torch.full_like(abs_diff, fill_value=scale) - self.assertTrue(torch.allclose(abs_diff, t_scale, atol=1e-5)) + torch.testing.assert_close(abs_diff, t_scale, rtol=1e-5, atol=1e-5) x = torch.randint(50, 100, size=(2, 3, 10, 10)).to(dtype) qx = torch.quantize_per_tensor(x, scale=1, zero_point=0, dtype=torch.qint8) @@ -555,7 +557,7 @@ def test_nms_cuda_float16(self): iou_thres = 0.2 keep32 = ops.nms(boxes, scores, iou_thres) keep16 = ops.nms(boxes.to(torch.float16), scores.to(torch.float16), iou_thres) - assert torch.all(torch.eq(keep32, keep16)) + assert_equal(keep32, keep16) @cpu_only def test_batched_nms_implementations(self): @@ -573,12 +575,13 @@ def test_batched_nms_implementations(self): keep_vanilla = ops.boxes._batched_nms_vanilla(boxes, scores, idxs, iou_threshold) keep_trick = ops.boxes._batched_nms_coordinate_trick(boxes, scores, idxs, iou_threshold) - err_msg = "The vanilla and the trick implementation yield different nms outputs." - assert torch.allclose(keep_vanilla, keep_trick), err_msg + torch.testing.assert_close( + keep_vanilla, keep_trick, msg="The vanilla and the trick implementation yield different nms outputs." + ) # Also make sure an empty tensor is returned if boxes is empty empty = torch.empty((0,), dtype=torch.int64) - assert torch.allclose(empty, ops.batched_nms(empty, None, None, None)) + torch.testing.assert_close(empty, ops.batched_nms(empty, None, None, None)) class DeformConvTester(OpTester, unittest.TestCase): @@ -690,15 +693,17 @@ def _test_forward_with_batchsize(self, device, contiguous, batch_sz, dtype): bias = layer.bias.data expected = self.expected_fn(x, weight, offset, mask, bias, stride=stride, padding=padding, dilation=dilation) - self.assertTrue(torch.allclose(res.to(expected.dtype), expected, rtol=tol, atol=tol), - '\nres:\n{}\nexpected:\n{}'.format(res, expected)) + torch.testing.assert_close( + res.to(expected), expected, rtol=tol, atol=tol, msg='\nres:\n{}\nexpected:\n{}'.format(res, expected) + ) # no modulation test res = layer(x, offset) expected = self.expected_fn(x, weight, offset, None, bias, stride=stride, padding=padding, dilation=dilation) - self.assertTrue(torch.allclose(res.to(expected.dtype), expected, rtol=tol, atol=tol), - '\nres:\n{}\nexpected:\n{}'.format(res, expected)) + torch.testing.assert_close( + res.to(expected), expected, rtol=tol, atol=tol, msg='\nres:\n{}\nexpected:\n{}'.format(res, expected) + ) # test for wrong sizes with self.assertRaises(RuntimeError): @@ -778,7 +783,7 @@ def test_compare_cpu_cuda_grads(self): else: self.assertTrue(init_weight.grad is not None) res_grads = init_weight.grad.to("cpu") - self.assertTrue(true_cpu_grads.allclose(res_grads)) + torch.testing.assert_close(true_cpu_grads, res_grads) @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") def test_autocast(self): @@ -812,14 +817,14 @@ def test_frozenbatchnorm2d_eps(self): bn = torch.nn.BatchNorm2d(sample_size[1]).eval() bn.load_state_dict(state_dict) # Difference is expected to fall in an acceptable range - self.assertTrue(torch.allclose(fbn(x), bn(x), atol=1e-6)) + torch.testing.assert_close(fbn(x), bn(x), rtol=1e-5, atol=1e-6) # Check computation for eps > 0 fbn = ops.misc.FrozenBatchNorm2d(sample_size[1], eps=1e-5) fbn.load_state_dict(state_dict, strict=False) bn = torch.nn.BatchNorm2d(sample_size[1], eps=1e-5).eval() bn.load_state_dict(state_dict) - self.assertTrue(torch.allclose(fbn(x), bn(x), atol=1e-6)) + torch.testing.assert_close(fbn(x), bn(x), rtol=1e-5, atol=1e-6) def test_frozenbatchnorm2d_n_arg(self): """Ensure a warning is thrown when passing `n` kwarg @@ -860,20 +865,11 @@ def test_bbox_same(self): exp_xyxy = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0], [10, 15, 30, 35], [23, 35, 93, 95]], dtype=torch.float) - box_same = ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="xyxy") - self.assertEqual(exp_xyxy.size(), torch.Size([4, 4])) - self.assertEqual(exp_xyxy.dtype, box_tensor.dtype) - assert torch.all(torch.eq(box_same, exp_xyxy)).item() + assert_equal(ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="xyxy"), exp_xyxy) - box_same = ops.box_convert(box_tensor, in_fmt="xywh", out_fmt="xywh") - self.assertEqual(exp_xyxy.size(), torch.Size([4, 4])) - self.assertEqual(exp_xyxy.dtype, box_tensor.dtype) - assert torch.all(torch.eq(box_same, exp_xyxy)).item() + assert_equal(ops.box_convert(box_tensor, in_fmt="xywh", out_fmt="xywh"), exp_xyxy) - box_same = ops.box_convert(box_tensor, in_fmt="cxcywh", out_fmt="cxcywh") - self.assertEqual(exp_xyxy.size(), torch.Size([4, 4])) - self.assertEqual(exp_xyxy.dtype, box_tensor.dtype) - assert torch.all(torch.eq(box_same, exp_xyxy)).item() + assert_equal(ops.box_convert(box_tensor, in_fmt="cxcywh", out_fmt="cxcywh"), exp_xyxy) def test_bbox_xyxy_xywh(self): # Simple test convert boxes to xywh and back. Make sure they are same. @@ -884,15 +880,11 @@ def test_bbox_xyxy_xywh(self): [10, 15, 20, 20], [23, 35, 70, 60]], dtype=torch.float) box_xywh = ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="xywh") - self.assertEqual(exp_xywh.size(), torch.Size([4, 4])) - self.assertEqual(exp_xywh.dtype, box_tensor.dtype) - assert torch.all(torch.eq(box_xywh, exp_xywh)).item() + assert_equal(box_xywh, exp_xywh) # Reverse conversion box_xyxy = ops.box_convert(box_xywh, in_fmt="xywh", out_fmt="xyxy") - self.assertEqual(box_xyxy.size(), torch.Size([4, 4])) - self.assertEqual(box_xyxy.dtype, box_tensor.dtype) - assert torch.all(torch.eq(box_xyxy, box_tensor)).item() + assert_equal(box_xyxy, box_tensor) def test_bbox_xyxy_cxcywh(self): # Simple test convert boxes to xywh and back. Make sure they are same. @@ -903,15 +895,11 @@ def test_bbox_xyxy_cxcywh(self): [20, 25, 20, 20], [58, 65, 70, 60]], dtype=torch.float) box_cxcywh = ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="cxcywh") - self.assertEqual(exp_cxcywh.size(), torch.Size([4, 4])) - self.assertEqual(exp_cxcywh.dtype, box_tensor.dtype) - assert torch.all(torch.eq(box_cxcywh, exp_cxcywh)).item() + assert_equal(box_cxcywh, exp_cxcywh) # Reverse conversion box_xyxy = ops.box_convert(box_cxcywh, in_fmt="cxcywh", out_fmt="xyxy") - self.assertEqual(box_xyxy.size(), torch.Size([4, 4])) - self.assertEqual(box_xyxy.dtype, box_tensor.dtype) - assert torch.all(torch.eq(box_xyxy, box_tensor)).item() + assert_equal(box_xyxy, box_tensor) def test_bbox_xywh_cxcywh(self): box_tensor = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0], @@ -922,15 +910,11 @@ def test_bbox_xywh_cxcywh(self): [20, 25, 20, 20], [58, 65, 70, 60]], dtype=torch.float) box_cxcywh = ops.box_convert(box_tensor, in_fmt="xywh", out_fmt="cxcywh") - self.assertEqual(exp_cxcywh.size(), torch.Size([4, 4])) - self.assertEqual(exp_cxcywh.dtype, box_tensor.dtype) - assert torch.all(torch.eq(box_cxcywh, exp_cxcywh)).item() + assert_equal(box_cxcywh, exp_cxcywh) # Reverse conversion box_xywh = ops.box_convert(box_cxcywh, in_fmt="cxcywh", out_fmt="xywh") - self.assertEqual(box_xywh.size(), torch.Size([4, 4])) - self.assertEqual(box_xywh.dtype, box_tensor.dtype) - assert torch.all(torch.eq(box_xywh, box_tensor)).item() + assert_equal(box_xywh, box_tensor) def test_bbox_invalid(self): box_tensor = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0], @@ -951,19 +935,18 @@ def test_bbox_convert_jit(self): box_xywh = ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="xywh") scripted_xywh = scripted_fn(box_tensor, 'xyxy', 'xywh') - self.assertTrue((scripted_xywh - box_xywh).abs().max() < TOLERANCE) + torch.testing.assert_close(scripted_xywh, box_xywh, rtol=0.0, atol=TOLERANCE) box_cxcywh = ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="cxcywh") scripted_cxcywh = scripted_fn(box_tensor, 'xyxy', 'cxcywh') - self.assertTrue((scripted_cxcywh - box_cxcywh).abs().max() < TOLERANCE) + torch.testing.assert_close(scripted_cxcywh, box_cxcywh, rtol=0.0, atol=TOLERANCE) class BoxAreaTester(unittest.TestCase): def test_box_area(self): def area_check(box, expected, tolerance=1e-4): out = ops.box_area(box) - assert out.size() == expected.size() - assert ((out - expected).abs().max() < tolerance).item() + torch.testing.assert_close(out, expected, rtol=0.0, atol=tolerance) # Check for int boxes for dtype in [torch.int8, torch.int16, torch.int32, torch.int64]: @@ -991,8 +974,7 @@ class BoxIouTester(unittest.TestCase): def test_iou(self): def iou_check(box, expected, tolerance=1e-4): out = ops.box_iou(box, box) - assert out.size() == expected.size() - assert ((out - expected).abs().max() < tolerance).item() + torch.testing.assert_close(out, expected, rtol=0.0, atol=tolerance) # Check for int boxes for dtype in [torch.int16, torch.int32, torch.int64]: @@ -1013,8 +995,7 @@ class GenBoxIouTester(unittest.TestCase): def test_gen_iou(self): def gen_iou_check(box, expected, tolerance=1e-4): out = ops.generalized_box_iou(box, box) - assert out.size() == expected.size() - assert ((out - expected).abs().max() < tolerance).item() + torch.testing.assert_close(out, expected, rtol=0.0, atol=tolerance) # Check for int boxes for dtype in [torch.int16, torch.int32, torch.int64]: diff --git a/test/test_transforms.py b/test/test_transforms.py index 9402a37bc35..da8813a20f9 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -22,6 +22,7 @@ stats = None from common_utils import cycle_over, int_dtypes, float_dtypes +from _assert_utils import assert_equal GRACE_HOPPER = get_file_path_2( @@ -102,8 +103,9 @@ def test_center_crop_2(self): "image_size: {} crop_size: {}".format(input_image_size, crop_size)) # Ensure output for PIL and Tensor are equal - self.assertEqual((output_tensor - output_pil).sum(), 0, - "image_size: {} crop_size: {}".format(input_image_size, crop_size)) + assert_equal( + output_tensor, output_pil, msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) + ) # Check if content in center of both image and cropped output is same. center_size = (min(crop_size[0], input_image_size[0]), min(crop_size[1], input_image_size[1])) @@ -126,8 +128,9 @@ def test_center_crop_2(self): input_center_tl[1]:input_center_tl[1] + center_size[1] ] - self.assertEqual((output_center - img_center).sum(), 0, - "image_size: {} crop_size: {}".format(input_image_size, crop_size)) + assert_equal( + output_center, img_center, msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) + ) def test_five_crop(self): to_pil_image = transforms.ToPILImage() @@ -382,7 +385,7 @@ def test_random_crop(self): ])(img) self.assertEqual(result.size(1), height) self.assertEqual(result.size(2), width) - self.assertTrue(np.allclose(img.numpy(), result.numpy())) + torch.testing.assert_close(result, img) result = transforms.Compose([ transforms.ToPILImage(), @@ -414,8 +417,8 @@ def test_pad(self): # to the pad value fill_v = fill / 255 eps = 1e-5 - self.assertTrue((result[:, :padding, :] - fill_v).abs().max() < eps) - self.assertTrue((result[:, :, :padding] - fill_v).abs().max() < eps) + torch.testing.assert_close(result[:, :padding, :], fill_v, rtol=0.0, atol=eps) + torch.testing.assert_close(result[:, :, :padding], fill_v, rtol=0.0, atol=eps) self.assertRaises(ValueError, transforms.Pad(padding, fill=(1, 2)), transforms.ToPILImage()(img)) @@ -448,7 +451,7 @@ def test_pad_with_non_constant_padding_modes(self): # First 6 elements of leftmost edge in the middle of the image, values are in order: # edge_pad, edge_pad, edge_pad, constant_pad, constant value added to leftmost edge, 0 edge_middle_slice = np.asarray(edge_padded_img).transpose(2, 0, 1)[0][17][:6] - self.assertTrue(np.all(edge_middle_slice == np.asarray([200, 200, 200, 200, 1, 0]))) + assert_equal(edge_middle_slice, np.asarray([200, 200, 200, 200, 1, 0])) self.assertEqual(transforms.ToTensor()(edge_padded_img).size(), (3, 35, 35)) # Pad 3 to left/right, 2 to top/bottom @@ -456,7 +459,7 @@ def test_pad_with_non_constant_padding_modes(self): # First 6 elements of leftmost edge in the middle of the image, values are in order: # reflect_pad, reflect_pad, reflect_pad, constant_pad, constant value added to leftmost edge, 0 reflect_middle_slice = np.asarray(reflect_padded_img).transpose(2, 0, 1)[0][17][:6] - self.assertTrue(np.all(reflect_middle_slice == np.asarray([0, 0, 1, 200, 1, 0]))) + assert_equal(reflect_middle_slice, np.asarray([0, 0, 1, 200, 1, 0])) self.assertEqual(transforms.ToTensor()(reflect_padded_img).size(), (3, 33, 35)) # Pad 3 to left, 2 to top, 2 to right, 1 to bottom @@ -464,7 +467,7 @@ def test_pad_with_non_constant_padding_modes(self): # First 6 elements of leftmost edge in the middle of the image, values are in order: # sym_pad, sym_pad, sym_pad, constant_pad, constant value added to leftmost edge, 0 symmetric_middle_slice = np.asarray(symmetric_padded_img).transpose(2, 0, 1)[0][17][:6] - self.assertTrue(np.all(symmetric_middle_slice == np.asarray([0, 1, 200, 200, 1, 0]))) + assert_equal(symmetric_middle_slice, np.asarray([0, 1, 200, 200, 1, 0])) self.assertEqual(transforms.ToTensor()(symmetric_padded_img).size(), (3, 32, 34)) # Check negative padding explicitly for symmetric case, since it is not @@ -473,8 +476,8 @@ def test_pad_with_non_constant_padding_modes(self): symmetric_padded_img_neg = F.pad(img, (-1, 2, 3, -3), padding_mode='symmetric') symmetric_neg_middle_left = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][:3] symmetric_neg_middle_right = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][-4:] - self.assertTrue(np.all(symmetric_neg_middle_left == np.asarray([1, 0, 0]))) - self.assertTrue(np.all(symmetric_neg_middle_right == np.asarray([200, 200, 0, 0]))) + assert_equal(symmetric_neg_middle_left, np.asarray([1, 0, 0])) + assert_equal(symmetric_neg_middle_right, np.asarray([200, 200, 0, 0])) self.assertEqual(transforms.ToTensor()(symmetric_padded_img_neg).size(), (3, 28, 31)) def test_pad_raises_with_invalid_pad_sequence_len(self): @@ -499,12 +502,12 @@ def test_lambda(self): trans = transforms.Lambda(lambda x: x.add(10)) x = torch.randn(10) y = trans(x) - self.assertTrue(y.equal(torch.add(x, 10))) + assert_equal(y, torch.add(x, 10)) trans = transforms.Lambda(lambda x: x.add_(10)) x = torch.randn(10) y = trans(x) - self.assertTrue(y.equal(x)) + assert_equal(y, x) # Checking if Lambda can be printed as string trans.__repr__() @@ -613,23 +616,23 @@ def test_to_tensor(self): input_data = torch.ByteTensor(channels, height, width).random_(0, 255).float().div_(255) img = transforms.ToPILImage()(input_data) output = trans(img) - self.assertTrue(np.allclose(input_data.numpy(), output.numpy())) + torch.testing.assert_close(output, input_data) ndarray = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8) output = trans(ndarray) expected_output = ndarray.transpose((2, 0, 1)) / 255.0 - self.assertTrue(np.allclose(output.numpy(), expected_output)) + torch.testing.assert_close(output, expected_output) ndarray = np.random.rand(height, width, channels).astype(np.float32) output = trans(ndarray) expected_output = ndarray.transpose((2, 0, 1)) - self.assertTrue(np.allclose(output.numpy(), expected_output)) + torch.testing.assert_close(output, expected_output) # separate test for mode '1' PIL images input_data = torch.ByteTensor(1, height, width).bernoulli_() img = transforms.ToPILImage()(input_data.mul(255)).convert('1') output = trans(img) - self.assertTrue(np.allclose(input_data.numpy(), output.numpy())) + torch.testing.assert_close(input_data, output) def test_to_tensor_with_other_default_dtypes(self): current_def_dtype = torch.get_default_dtype() @@ -665,8 +668,7 @@ def test_convert_image_dtype_float_to_float(self): output_image = transform(input_image) output_image_script = transform_script(input_image, output_dtype) - script_diff = output_image_script - output_image - self.assertLess(script_diff.abs().max(), 1e-6) + torch.testing.assert_close(output_image_script, output_image, rtol=0.0, atol=1e-6) actual_min, actual_max = output_image.tolist() desired_min, desired_max = 0.0, 1.0 @@ -691,8 +693,7 @@ def test_convert_image_dtype_float_to_int(self): output_image = transform(input_image) output_image_script = transform_script(input_image, output_dtype) - script_diff = output_image_script - output_image - self.assertLess(script_diff.abs().max(), 1e-6) + torch.testing.assert_close(output_image_script, output_image, rtol=0.0, atol=1e-6) actual_min, actual_max = output_image.tolist() desired_min, desired_max = 0, torch.iinfo(output_dtype).max @@ -711,8 +712,7 @@ def test_convert_image_dtype_int_to_float(self): output_image = transform(input_image) output_image_script = transform_script(input_image, output_dtype) - script_diff = output_image_script - output_image - self.assertLess(script_diff.abs().max(), 1e-6) + torch.testing.assert_close(output_image_script, output_image, rtol=0.0, atol=1e-6) actual_min, actual_max = output_image.tolist() desired_min, desired_max = 0.0, 1.0 @@ -736,9 +736,12 @@ def test_convert_image_dtype_int_to_int(self): output_image = transform(input_image) output_image_script = transform_script(input_image, output_dtype) - script_diff = output_image_script.float() - output_image.float() - self.assertLess( - script_diff.abs().max(), 1e-6, msg="{} vs {}".format(output_image_script, output_image) + torch.testing.assert_close( + output_image_script, + output_image, + rtol=0.0, + atol=1e-6, + msg="{} vs {}".format(output_image_script, output_image), ) actual_min, actual_max = output_image.tolist() @@ -780,8 +783,7 @@ def test_accimage_to_tensor(self): expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB')) output = trans(accimage.Image(GRACE_HOPPER)) - self.assertEqual(expected_output.size(), output.size()) - self.assertTrue(np.allclose(output.numpy(), expected_output.numpy())) + torch.testing.assert_close(output, expected_output) def test_pil_to_tensor(self): test_channels = [1, 3, 4] @@ -796,25 +798,25 @@ def test_pil_to_tensor(self): input_data = torch.ByteTensor(channels, height, width).random_(0, 255) img = transforms.ToPILImage()(input_data) output = trans(img) - self.assertTrue(np.allclose(input_data.numpy(), output.numpy())) + torch.testing.assert_close(input_data, output) input_data = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8) img = transforms.ToPILImage()(input_data) output = trans(img) expected_output = input_data.transpose((2, 0, 1)) - self.assertTrue(np.allclose(output.numpy(), expected_output)) + torch.testing.assert_close(output.numpy(), expected_output) input_data = torch.as_tensor(np.random.rand(channels, height, width).astype(np.float32)) img = transforms.ToPILImage()(input_data) # CHW -> HWC and (* 255).byte() output = trans(img) # HWC -> CHW expected_output = (input_data * 255).byte() - self.assertTrue(np.allclose(output.numpy(), expected_output.numpy())) + torch.testing.assert_close(output, expected_output) # separate test for mode '1' PIL images input_data = torch.ByteTensor(1, height, width).bernoulli_() img = transforms.ToPILImage()(input_data.mul(255)).convert('1') output = trans(img) - self.assertTrue(np.allclose(input_data.numpy(), output.numpy())) + torch.testing.assert_close(input_data, output) @unittest.skipIf(accimage is None, 'accimage not available') def test_accimage_pil_to_tensor(self): @@ -824,7 +826,7 @@ def test_accimage_pil_to_tensor(self): output = trans(accimage.Image(GRACE_HOPPER)) self.assertEqual(expected_output.size(), output.size()) - self.assertTrue(np.allclose(output.numpy(), expected_output.numpy())) + torch.testing.assert_close(output, expected_output) @unittest.skipIf(accimage is None, 'accimage not available') def test_accimage_resize(self): @@ -859,7 +861,7 @@ def test_accimage_crop(self): output = trans(accimage.Image(GRACE_HOPPER)) self.assertEqual(expected_output.size(), output.size()) - self.assertTrue(np.allclose(output.numpy(), expected_output.numpy())) + torch.testing.assert_close(output, expected_output) def test_1_channel_tensor_to_pil_image(self): to_tensor = transforms.ToTensor() @@ -880,12 +882,13 @@ def test_1_channel_tensor_to_pil_image(self): for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: img = transform(img_data) self.assertEqual(img.mode, mode) - self.assertTrue(np.allclose(expected_output, to_tensor(img).numpy())) + torch.testing.assert_close(expected_output, to_tensor(img).numpy()) # 'F' mode for torch.FloatTensor img_F_mode = transforms.ToPILImage(mode='F')(img_data_float) self.assertEqual(img_F_mode.mode, 'F') - self.assertTrue(np.allclose(np.array(Image.fromarray(img_data_float.squeeze(0).numpy(), mode='F')), - np.array(img_F_mode))) + torch.testing.assert_close( + np.array(Image.fromarray(img_data_float.squeeze(0).numpy(), mode='F')), np.array(img_F_mode) + ) def test_1_channel_ndarray_to_pil_image(self): img_data_float = torch.Tensor(4, 4, 1).uniform_().numpy() @@ -899,7 +902,7 @@ def test_1_channel_ndarray_to_pil_image(self): for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: img = transform(img_data) self.assertEqual(img.mode, mode) - self.assertTrue(np.allclose(img_data[:, :, 0], img)) + torch.testing.assert_close(img_data[:, :, 0], np.asarray(img).astype(img_data.dtype)) def test_2_channel_ndarray_to_pil_image(self): def verify_img_data(img_data, mode): @@ -911,7 +914,7 @@ def verify_img_data(img_data, mode): self.assertEqual(img.mode, mode) split = img.split() for i in range(2): - self.assertTrue(np.allclose(img_data[:, :, i], split[i])) + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i])) img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy() for mode in [None, 'LA']: @@ -984,7 +987,7 @@ def verify_img_data(img_data, mode): self.assertEqual(img.mode, mode) split = img.split() for i in range(3): - self.assertTrue(np.allclose(img_data[:, :, i], split[i])) + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i])) img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy() for mode in [None, 'RGB', 'HSV', 'YCbCr']: @@ -1033,7 +1036,7 @@ def verify_img_data(img_data, mode): self.assertEqual(img.mode, mode) split = img.split() for i in range(4): - self.assertTrue(np.allclose(img_data[:, :, i], split[i])) + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i])) img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy() for mode in [None, 'RGBA', 'CMYK', 'RGBX']: @@ -1064,7 +1067,7 @@ def test_2d_tensor_to_pil_image(self): for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: img = transform(img_data) self.assertEqual(img.mode, mode) - self.assertTrue(np.allclose(expected_output, to_tensor(img).numpy())) + torch.testing.assert_close(expected_output, to_tensor(img).numpy()) def test_2d_ndarray_to_pil_image(self): img_data_float = torch.Tensor(4, 4).uniform_().numpy() @@ -1078,7 +1081,7 @@ def test_2d_ndarray_to_pil_image(self): for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: img = transform(img_data) self.assertEqual(img.mode, mode) - self.assertTrue(np.allclose(img_data, img)) + torch.testing.assert_close(img_data, img) def test_tensor_bad_types_to_pil_image(self): with self.assertRaisesRegex(ValueError, r'pic should be 2/3 dimensional. Got \d+ dimensions.'): @@ -1189,7 +1192,7 @@ def samples_from_standard_normal(tensor): # Checking the optional in-place behaviour tensor = torch.rand((1, 16, 16)) tensor_inplace = transforms.Normalize((0.5,), (0.5,), inplace=True)(tensor) - self.assertTrue(torch.equal(tensor, tensor_inplace)) + assert_equal(tensor, tensor_inplace) def test_normalize_different_dtype(self): for dtype1 in [torch.float32, torch.float64]: @@ -1215,8 +1218,8 @@ def test_normalize_3d_tensor(self): result2 = F.normalize(img, mean_unsqueezed.repeat(1, img_size, img_size), std_unsqueezed.repeat(1, img_size, img_size)) - assert_array_almost_equal(target, result1.numpy()) - assert_array_almost_equal(target, result2.numpy()) + torch.testing.assert_close(target, result1.numpy()) + torch.testing.assert_close(target, result2.numpy()) def test_adjust_brightness(self): x_shape = [2, 2, 3] @@ -1227,21 +1230,21 @@ def test_adjust_brightness(self): # test 0 y_pil = F.adjust_brightness(x_pil, 1) y_np = np.array(y_pil) - self.assertTrue(np.allclose(y_np, x_np)) + torch.testing.assert_close(y_np, x_np) # test 1 y_pil = F.adjust_brightness(x_pil, 0.5) y_np = np.array(y_pil) y_ans = [0, 2, 6, 27, 67, 113, 18, 4, 117, 45, 127, 0] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - self.assertTrue(np.allclose(y_np, y_ans)) + torch.testing.assert_close(y_np, y_ans) # test 2 y_pil = F.adjust_brightness(x_pil, 2) y_np = np.array(y_pil) y_ans = [0, 10, 26, 108, 255, 255, 74, 16, 255, 180, 255, 2] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - self.assertTrue(np.allclose(y_np, y_ans)) + torch.testing.assert_close(y_np, y_ans) def test_adjust_contrast(self): x_shape = [2, 2, 3] @@ -1252,21 +1255,21 @@ def test_adjust_contrast(self): # test 0 y_pil = F.adjust_contrast(x_pil, 1) y_np = np.array(y_pil) - self.assertTrue(np.allclose(y_np, x_np)) + torch.testing.assert_close(y_np, x_np) # test 1 y_pil = F.adjust_contrast(x_pil, 0.5) y_np = np.array(y_pil) y_ans = [43, 45, 49, 70, 110, 156, 61, 47, 160, 88, 170, 43] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - self.assertTrue(np.allclose(y_np, y_ans)) + torch.testing.assert_close(y_np, y_ans) # test 2 y_pil = F.adjust_contrast(x_pil, 2) y_np = np.array(y_pil) y_ans = [0, 0, 0, 22, 184, 255, 0, 0, 255, 94, 255, 0] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - self.assertTrue(np.allclose(y_np, y_ans)) + torch.testing.assert_close(y_np, y_ans) @unittest.skipIf(Image.__version__ >= '7', "Temporarily disabled") def test_adjust_saturation(self): @@ -1278,21 +1281,21 @@ def test_adjust_saturation(self): # test 0 y_pil = F.adjust_saturation(x_pil, 1) y_np = np.array(y_pil) - self.assertTrue(np.allclose(y_np, x_np)) + torch.testing.assert_close(y_np, x_np) # test 1 y_pil = F.adjust_saturation(x_pil, 0.5) y_np = np.array(y_pil) y_ans = [2, 4, 8, 87, 128, 173, 39, 25, 138, 133, 215, 88] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - self.assertTrue(np.allclose(y_np, y_ans)) + torch.testing.assert_close(y_np, y_ans) # test 2 y_pil = F.adjust_saturation(x_pil, 2) y_np = np.array(y_pil) y_ans = [0, 6, 22, 0, 149, 255, 32, 0, 255, 4, 255, 0] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - self.assertTrue(np.allclose(y_np, y_ans)) + torch.testing.assert_close(y_np, y_ans) def test_adjust_hue(self): x_shape = [2, 2, 3] @@ -1310,21 +1313,21 @@ def test_adjust_hue(self): y_np = np.array(y_pil) y_ans = [0, 5, 13, 54, 139, 226, 35, 8, 234, 91, 255, 1] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - self.assertTrue(np.allclose(y_np, y_ans)) + torch.testing.assert_close(y_np, y_ans) # test 1 y_pil = F.adjust_hue(x_pil, 0.25) y_np = np.array(y_pil) y_ans = [13, 0, 12, 224, 54, 226, 234, 8, 99, 1, 222, 255] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - self.assertTrue(np.allclose(y_np, y_ans)) + torch.testing.assert_close(y_np, y_ans) # test 2 y_pil = F.adjust_hue(x_pil, -0.25) y_np = np.array(y_pil) y_ans = [0, 13, 2, 54, 226, 58, 8, 234, 152, 255, 43, 1] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - self.assertTrue(np.allclose(y_np, y_ans)) + torch.testing.assert_close(y_np, y_ans) def test_adjust_sharpness(self): x_shape = [4, 4, 3] @@ -1337,7 +1340,7 @@ def test_adjust_sharpness(self): # test 0 y_pil = F.adjust_sharpness(x_pil, 1) y_np = np.array(y_pil) - self.assertTrue(np.allclose(y_np, x_np)) + torch.testing.assert_close(y_np, x_np) # test 1 y_pil = F.adjust_sharpness(x_pil, 0.5) @@ -1346,7 +1349,7 @@ def test_adjust_sharpness(self): 30, 74, 103, 96, 114, 97, 110, 100, 101, 114, 32, 81, 103, 108, 102, 101, 107, 116, 105, 115, 0, 0, 73, 32, 108, 111, 118, 101, 32, 121, 111, 117] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - self.assertTrue(np.allclose(y_np, y_ans)) + torch.testing.assert_close(y_np, y_ans) # test 2 y_pil = F.adjust_sharpness(x_pil, 2) @@ -1355,7 +1358,7 @@ def test_adjust_sharpness(self): 0, 46, 118, 111, 132, 97, 110, 100, 101, 114, 32, 95, 135, 146, 126, 112, 119, 116, 105, 115, 0, 0, 73, 32, 108, 111, 118, 101, 32, 121, 111, 117] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - self.assertTrue(np.allclose(y_np, y_ans)) + torch.testing.assert_close(y_np, y_ans) # test 3 x_shape = [2, 2, 3] @@ -1366,7 +1369,7 @@ def test_adjust_sharpness(self): y_pil = F.adjust_sharpness(x_pil, 2) y_np = np.array(y_pil).transpose(2, 0, 1) y_th = F.adjust_sharpness(x_th, 2) - self.assertTrue(np.allclose(y_np, y_th.numpy())) + torch.testing.assert_close(y_np, y_th.numpy()) def test_adjust_gamma(self): x_shape = [2, 2, 3] @@ -1377,21 +1380,21 @@ def test_adjust_gamma(self): # test 0 y_pil = F.adjust_gamma(x_pil, 1) y_np = np.array(y_pil) - self.assertTrue(np.allclose(y_np, x_np)) + torch.testing.assert_close(y_np, x_np) # test 1 y_pil = F.adjust_gamma(x_pil, 0.5) y_np = np.array(y_pil) y_ans = [0, 35, 57, 117, 186, 241, 97, 45, 245, 152, 255, 16] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - self.assertTrue(np.allclose(y_np, y_ans)) + torch.testing.assert_close(y_np, y_ans) # test 2 y_pil = F.adjust_gamma(x_pil, 2) y_np = np.array(y_pil) y_ans = [0, 0, 0, 11, 71, 201, 5, 0, 215, 31, 255, 0] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - self.assertTrue(np.allclose(y_np, y_ans)) + torch.testing.assert_close(y_np, y_ans) def test_adjusts_L_mode(self): x_shape = [2, 2, 3] @@ -1450,10 +1453,8 @@ def test_linear_transformation(self): cov += np.dot(xwhite, xwhite.T) / num_features mean += np.sum(xwhite) / num_features # if rtol for std = 1e-3 then rtol for cov = 2e-3 as std**2 = cov - self.assertTrue(np.allclose(cov / num_samples, np.identity(1), rtol=2e-3), - "cov not close to 1") - self.assertTrue(np.allclose(mean / num_samples, 0, rtol=1e-3), - "mean not close to 0") + torch.testing.assert_close(cov / num_samples, np.identity(1), rtol=2e-3, atol=1e-8, msg="cov not close to 1") + torch.testing.assert_close(mean / num_samples, 0, rtol=1e-3, atol=1e-8, msg="mean not close to 1") # Checking if LinearTransformation can be printed as string whitening.__repr__() @@ -1491,7 +1492,7 @@ def test_rotate(self): result_a = F.rotate(img, 90) result_b = F.rotate(img, -270) - self.assertTrue(np.all(np.array(result_a) == np.array(result_b))) + assert_equal(np.array(result_a), np.array(result_b)) def test_rotate_fill(self): img = F.to_pil_image(np.ones((100, 100, 3), dtype=np.uint8) * 255, "RGB") @@ -1732,7 +1733,7 @@ def test_to_grayscale(self): gray_np_1 = np.array(gray_pil_1) self.assertEqual(gray_pil_1.mode, 'L', 'mode should be L') self.assertEqual(gray_np_1.shape, tuple(x_shape[0:2]), 'should be 1 channel') - np.testing.assert_equal(gray_np, gray_np_1) + assert_equal(gray_np, gray_np_1) # Case 2: RGB -> 3 channel grayscale trans2 = transforms.Grayscale(num_output_channels=3) @@ -1740,9 +1741,9 @@ def test_to_grayscale(self): gray_np_2 = np.array(gray_pil_2) self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB') self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel') - np.testing.assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) - np.testing.assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) - np.testing.assert_equal(gray_np, gray_np_2[:, :, 0]) + assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) + assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) + assert_equal(gray_np, gray_np_2[:, :, 0]) # Case 3: 1 channel grayscale -> 1 channel grayscale trans3 = transforms.Grayscale(num_output_channels=1) @@ -1750,7 +1751,7 @@ def test_to_grayscale(self): gray_np_3 = np.array(gray_pil_3) self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L') self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel') - np.testing.assert_equal(gray_np, gray_np_3) + assert_equal(gray_np, gray_np_3) # Case 4: 1 channel grayscale -> 3 channel grayscale trans4 = transforms.Grayscale(num_output_channels=3) @@ -1758,9 +1759,9 @@ def test_to_grayscale(self): gray_np_4 = np.array(gray_pil_4) self.assertEqual(gray_pil_4.mode, 'RGB', 'mode should be RGB') self.assertEqual(gray_np_4.shape, tuple(x_shape), 'should be 3 channel') - np.testing.assert_equal(gray_np_4[:, :, 0], gray_np_4[:, :, 1]) - np.testing.assert_equal(gray_np_4[:, :, 1], gray_np_4[:, :, 2]) - np.testing.assert_equal(gray_np, gray_np_4[:, :, 0]) + assert_equal(gray_np_4[:, :, 0], gray_np_4[:, :, 1]) + assert_equal(gray_np_4[:, :, 1], gray_np_4[:, :, 2]) + assert_equal(gray_np, gray_np_4[:, :, 0]) # Checking if Grayscale can be printed as string trans4.__repr__() @@ -1827,9 +1828,9 @@ def test_random_grayscale(self): gray_np_2 = np.array(gray_pil_2) self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB') self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel') - np.testing.assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) - np.testing.assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) - np.testing.assert_equal(gray_np, gray_np_2[:, :, 0]) + assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) + assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) + assert_equal(gray_np, gray_np_2[:, :, 0]) # Case 3b: RGB -> 3 channel grayscale (unchanged) trans2 = transforms.RandomGrayscale(p=0.0) @@ -1837,7 +1838,7 @@ def test_random_grayscale(self): gray_np_2 = np.array(gray_pil_2) self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB') self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel') - np.testing.assert_equal(x_np, gray_np_2) + assert_equal(x_np, gray_np_2) # Case 3c: 1 channel grayscale -> 1 channel grayscale (grayscaled) trans3 = transforms.RandomGrayscale(p=1.0) @@ -1845,7 +1846,7 @@ def test_random_grayscale(self): gray_np_3 = np.array(gray_pil_3) self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L') self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel') - np.testing.assert_equal(gray_np, gray_np_3) + assert_equal(gray_np, gray_np_3) # Case 3d: 1 channel grayscale -> 1 channel grayscale (unchanged) trans3 = transforms.RandomGrayscale(p=0.0) @@ -1853,7 +1854,7 @@ def test_random_grayscale(self): gray_np_3 = np.array(gray_pil_3) self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L') self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel') - np.testing.assert_equal(gray_np, gray_np_3) + assert_equal(gray_np, gray_np_3) # Checking if RandomGrayscale can be printed as string trans3.__repr__() diff --git a/test/test_transforms_tensor.py b/test/test_transforms_tensor.py index 2c598e90833..0d5e365351d 100644 --- a/test/test_transforms_tensor.py +++ b/test/test_transforms_tensor.py @@ -10,6 +10,7 @@ from typing import Sequence from common_utils import TransformsTester, get_tmp_dir, int_dtypes, float_dtypes +from _assert_utils import assert_equal NEAREST, BILINEAR, BICUBIC = InterpolationMode.NEAREST, InterpolationMode.BILINEAR, InterpolationMode.BICUBIC @@ -38,7 +39,7 @@ def _test_transform_vs_scripted(self, transform, s_transform, tensor, msg=None): out1 = transform(tensor) torch.manual_seed(12) out2 = s_transform(tensor) - self.assertTrue(out1.equal(out2), msg=msg) + assert_equal(out1, out2, msg=msg) def _test_transform_vs_scripted_on_batch(self, transform, s_transform, batch_tensors, msg=None): torch.manual_seed(12) @@ -48,11 +49,11 @@ def _test_transform_vs_scripted_on_batch(self, transform, s_transform, batch_ten img_tensor = batch_tensors[i, ...] torch.manual_seed(12) transformed_img = transform(img_tensor) - self.assertTrue(transformed_img.equal(transformed_batch[i, ...]), msg=msg) + assert_equal(transformed_img, transformed_batch[i, ...], msg=msg) torch.manual_seed(12) s_transformed_batch = s_transform(batch_tensors) - self.assertTrue(transformed_batch.equal(s_transformed_batch), msg=msg) + assert_equal(transformed_batch, s_transformed_batch, msg=msg) def _test_class_op(self, method, meth_kwargs=None, test_exact_match=True, **match_kwargs): if meth_kwargs is None: @@ -75,7 +76,7 @@ def _test_class_op(self, method, meth_kwargs=None, test_exact_match=True, **matc torch.manual_seed(12) transformed_tensor_script = scripted_fn(tensor) - self.assertTrue(transformed_tensor.equal(transformed_tensor_script)) + assert_equal(transformed_tensor, transformed_tensor_script) batch_tensors = self._create_data_batch(height=23, width=34, channels=3, num_samples=4, device=self.device) self._test_transform_vs_scripted_on_batch(f, scripted_fn, batch_tensors) @@ -270,8 +271,11 @@ def _test_op_list_output(self, func, method, out_length, fn_kwargs=None, meth_kw self.assertEqual(len(transformed_t_list), len(transformed_t_list_script)) self.assertEqual(len(transformed_t_list_script), out_length) for transformed_tensor, transformed_tensor_script in zip(transformed_t_list, transformed_t_list_script): - self.assertTrue(transformed_tensor.equal(transformed_tensor_script), - msg="{} vs {}".format(transformed_tensor, transformed_tensor_script)) + assert_equal( + transformed_tensor, + transformed_tensor_script, + msg="{} vs {}".format(transformed_tensor, transformed_tensor_script), + ) # test for class interface fn = getattr(T, method)(**meth_kwargs) @@ -289,8 +293,11 @@ def _test_op_list_output(self, func, method, out_length, fn_kwargs=None, meth_kw torch.manual_seed(12) transformed_img_list = fn(img_tensor) for transformed_img, transformed_batch in zip(transformed_img_list, transformed_batch_list): - self.assertTrue(transformed_img.equal(transformed_batch[i, ...]), - msg="{} vs {}".format(transformed_img, transformed_batch[i, ...])) + assert_equal( + transformed_img, + transformed_batch[i, ...], + msg="{} vs {}".format(transformed_img, transformed_batch[i, ...]), + ) with get_tmp_dir() as tmp_dir: scripted_fn.save(os.path.join(tmp_dir, "t_op_list_{}.pt".format(method))) @@ -505,7 +512,7 @@ def test_linear_transformation(self): transformed_batch = fn(batch_tensors) torch.manual_seed(12) s_transformed_batch = scripted_fn(batch_tensors) - self.assertTrue(transformed_batch.equal(s_transformed_batch)) + assert_equal(transformed_batch, s_transformed_batch) with get_tmp_dir() as tmp_dir: scripted_fn.save(os.path.join(tmp_dir, "t_norm.pt")) @@ -525,7 +532,7 @@ def test_compose(self): transformed_tensor = transforms(tensor) torch.manual_seed(12) transformed_tensor_script = scripted_fn(tensor) - self.assertTrue(transformed_tensor.equal(transformed_tensor_script), msg="{}".format(transforms)) + assert_equal(transformed_tensor, transformed_tensor_script, msg="{}".format(transforms)) t = T.Compose([ lambda x: x, @@ -551,7 +558,7 @@ def test_random_apply(self): transformed_tensor = transforms(tensor) torch.manual_seed(12) transformed_tensor_script = scripted_fn(tensor) - self.assertTrue(transformed_tensor.equal(transformed_tensor_script), msg="{}".format(transforms)) + assert_equal(transformed_tensor, transformed_tensor_script, msg="{}".format(transforms)) if torch.device(self.device).type == "cpu": # Can't check this twice, otherwise diff --git a/test/test_transforms_video.py b/test/test_transforms_video.py index e0c7ab5260b..942bb010f71 100644 --- a/test/test_transforms_video.py +++ b/test/test_transforms_video.py @@ -4,6 +4,7 @@ import random import numpy as np import warnings +from _assert_utils import assert_equal try: from scipy import stats @@ -120,7 +121,7 @@ def samples_from_standard_normal(tensor): # Checking the optional in-place behaviour tensor = torch.rand((3, 128, 16, 16)) tensor_inplace = transforms.NormalizeVideo((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)(tensor) - self.assertTrue(torch.equal(tensor, tensor_inplace)) + assert_equal(tensor, tensor_inplace) transforms.NormalizeVideo((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True).__repr__() diff --git a/test/test_utils.py b/test/test_utils.py index ee683b27ca4..2bb1a20cc0e 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -9,6 +9,7 @@ from io import BytesIO import torchvision.transforms.functional as F from PIL import Image, __version__ as PILLOW_VERSION, ImageColor +from _assert_utils import assert_equal PILLOW_VERSION = tuple(int(x) for x in PILLOW_VERSION.split('.')) @@ -48,13 +49,13 @@ def test_make_grid_not_inplace(self): t_clone = t.clone() utils.make_grid(t, normalize=False) - self.assertTrue(torch.equal(t, t_clone), 'make_grid modified tensor in-place') + assert_equal(t, t_clone, msg='make_grid modified tensor in-place') utils.make_grid(t, normalize=True, scale_each=False) - self.assertTrue(torch.equal(t, t_clone), 'make_grid modified tensor in-place') + assert_equal(t, t_clone, msg='make_grid modified tensor in-place') utils.make_grid(t, normalize=True, scale_each=True) - self.assertTrue(torch.equal(t, t_clone), 'make_grid modified tensor in-place') + assert_equal(t, t_clone, msg='make_grid modified tensor in-place') def test_normalize_in_make_grid(self): t = torch.rand(5, 3, 10, 10) * 255 @@ -70,8 +71,8 @@ def test_normalize_in_make_grid(self): rounded_grid_max = torch.round(grid_max * 10 ** n_digits) / (10 ** n_digits) rounded_grid_min = torch.round(grid_min * 10 ** n_digits) / (10 ** n_digits) - self.assertTrue(torch.equal(norm_max, rounded_grid_max), 'Normalized max is not equal to 1') - self.assertTrue(torch.equal(norm_min, rounded_grid_min), 'Normalized min is not equal to 0') + assert_equal(norm_max, rounded_grid_max, msg='Normalized max is not equal to 1') + assert_equal(norm_min, rounded_grid_min, msg='Normalized min is not equal to 0') @unittest.skipIf(sys.platform in ('win32', 'cygwin'), 'temporarily disabled on Windows') def test_save_image(self): @@ -96,8 +97,7 @@ def test_save_image_file_object(self): fp = BytesIO() utils.save_image(t, fp, format='png') img_bytes = Image.open(fp) - self.assertTrue(torch.equal(F.to_tensor(img_orig), F.to_tensor(img_bytes)), - 'Image not stored in file object') + assert_equal(F.to_tensor(img_orig), F.to_tensor(img_bytes), msg='Image not stored in file object') @unittest.skipIf(sys.platform in ('win32', 'cygwin'), 'temporarily disabled on Windows') def test_save_image_single_pixel_file_object(self): @@ -108,8 +108,7 @@ def test_save_image_single_pixel_file_object(self): fp = BytesIO() utils.save_image(t, fp, format='png') img_bytes = Image.open(fp) - self.assertTrue(torch.equal(F.to_tensor(img_orig), F.to_tensor(img_bytes)), - 'Pixel Image not stored in file object') + assert_equal(F.to_tensor(img_orig), F.to_tensor(img_bytes), msg='Image not stored in file object') def test_draw_boxes(self): img = torch.full((3, 100, 100), 255, dtype=torch.uint8) @@ -127,11 +126,11 @@ def test_draw_boxes(self): if PILLOW_VERSION >= (8, 2): # The reference image is only valid for new PIL versions expected = torch.as_tensor(np.array(Image.open(path))).permute(2, 0, 1) - self.assertTrue(torch.equal(result, expected)) + assert_equal(result, expected) # Check if modification is not in place - self.assertTrue(torch.all(torch.eq(boxes, boxes_cp)).item()) - self.assertTrue(torch.all(torch.eq(img, img_cp)).item()) + assert_equal(boxes, boxes_cp) + assert_equal(img, img_cp) def test_draw_boxes_vanilla(self): img = torch.full((3, 100, 100), 0, dtype=torch.uint8) @@ -145,10 +144,10 @@ def test_draw_boxes_vanilla(self): res.save(path) expected = torch.as_tensor(np.array(Image.open(path))).permute(2, 0, 1) - self.assertTrue(torch.equal(result, expected)) + assert_equal(result, expected) # Check if modification is not in place - self.assertTrue(torch.all(torch.eq(boxes, boxes_cp)).item()) - self.assertTrue(torch.all(torch.eq(img, img_cp)).item()) + assert_equal(boxes, boxes_cp) + assert_equal(img, img_cp) def test_draw_invalid_boxes(self): img_tp = ((1, 1, 1), (1, 2, 3)) @@ -187,7 +186,7 @@ def test_draw_segmentation_masks(colors, alpha): # Make sure the image didn't change where there's no mask masked_pixels = masks[0] | masks[1] - assert (img[:, ~masked_pixels] == out[:, ~masked_pixels]).all() + assert_equal(img[:, ~masked_pixels], out[:, ~masked_pixels]) if colors is None: colors = utils._generate_color_palette(num_masks) @@ -199,13 +198,12 @@ def test_draw_segmentation_masks(colors, alpha): color = torch.tensor(color, dtype=dtype) if alpha == 1: - assert (out[:, mask] == color[:, None]).all() + assert_equal(out[:, mask], color[:, None]) elif alpha == 0: - assert (out[:, mask] == img[:, mask]).all() + assert_equal(out[:, mask], img[:, mask]) interpolated_color = (img[:, mask] * (1 - alpha) + color[:, None] * alpha) - max_diff = (out[:, mask] - interpolated_color).abs().max() - assert max_diff <= 1 + torch.testing.assert_close(out[:, mask], interpolated_color, rtol=0.0, atol=1.0) def test_draw_segmentation_masks_errors(): diff --git a/test/test_video_reader.py b/test/test_video_reader.py index 5b9b2184daf..d9326138397 100644 --- a/test/test_video_reader.py +++ b/test/test_video_reader.py @@ -11,6 +11,7 @@ from numpy.random import randint from torchvision.io import _HAS_VIDEO_OPT from common_utils import PY39_SKIP +from _assert_utils import assert_equal try: @@ -359,8 +360,7 @@ def compare_decoding_result(self, tv_result, ref_result, config=all_check_config ) self.assertAlmostEqual(mean_delta, 0, delta=1.0) - is_same = torch.all(torch.eq(vtimebase, ref_result.vtimebase)).item() - self.assertEqual(is_same, True) + assert_equal(vtimebase, ref_result.vtimebase) if ( config.check_aframes @@ -369,8 +369,7 @@ def compare_decoding_result(self, tv_result, ref_result, config=all_check_config ): """Audio stream is available and audio frame is required to return from decoder""" - is_same = torch.all(torch.eq(aframes, ref_result.aframes)).item() - self.assertEqual(is_same, True) + assert_equal(aframes, ref_result.aframes) if ( config.check_aframe_pts @@ -378,11 +377,9 @@ def compare_decoding_result(self, tv_result, ref_result, config=all_check_config and ref_result.aframe_pts.numel() > 0 ): """Audio stream is available""" - is_same = torch.all(torch.eq(aframe_pts, ref_result.aframe_pts)).item() - self.assertEqual(is_same, True) + assert_equal(aframe_pts, ref_result.aframe_pts) - is_same = torch.all(torch.eq(atimebase, ref_result.atimebase)).item() - self.assertEqual(is_same, True) + assert_equal(atimebase, ref_result.atimebase) @unittest.skip( "This stress test will iteratively decode the same set of videos." From bfbe19b4102642a4526e9fd8234c71c089020fe4 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Thu, 20 May 2021 16:27:57 +0200 Subject: [PATCH 02/16] revert some changes --- test/test_hub.py | 10 ++++++---- test/test_image.py | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/test/test_hub.py b/test/test_hub.py index 7050baaab4c..96fcc664959 100644 --- a/test/test_hub.py +++ b/test/test_hub.py @@ -5,8 +5,6 @@ import sys import unittest -import torch.testing - def sum_of_model_parameters(model): s = 0 @@ -33,7 +31,9 @@ def test_load_from_github(self): 'resnet18', pretrained=True, progress=False) - torch.testing.assert_close(sum_of_model_parameters(hub_model).item(), SUM_OF_PRETRAINED_RESNET18_PARAMS) + self.assertAlmostEqual(sum_of_model_parameters(hub_model).item(), + SUM_OF_PRETRAINED_RESNET18_PARAMS, + places = 2) def test_set_dir(self): temp_dir = tempfile.gettempdir() @@ -43,7 +43,9 @@ def test_set_dir(self): 'resnet18', pretrained=True, progress=False) - torch.testing.assert_close(sum_of_model_parameters(hub_model).item(), SUM_OF_PRETRAINED_RESNET18_PARAMS) + self.assertAlmostEqual(sum_of_model_parameters(hub_model).item(), + SUM_OF_PRETRAINED_RESNET18_PARAMS, + places=2) self.assertTrue(os.path.exists(temp_dir + '/pytorch_vision_master')) shutil.rmtree(temp_dir + '/pytorch_vision_master') diff --git a/test/test_image.py b/test/test_image.py index da1f6a636eb..1d2eab5798c 100644 --- a/test/test_image.py +++ b/test/test_image.py @@ -293,7 +293,7 @@ def test_decode_jpeg_cuda(mode, img_path, scripted): img_nvjpeg = f(data, mode=mode, device='cuda').cpu() # Some difference expected between jpeg implementations - torch.testing.assert_close(img, img_nvjpeg, rtol=0.0, atol=2.0, check_stride=False) + torch.testing.assert_close(img, img_nvjpeg, rtol=0.0, atol=2.0) @needs_cuda From 09f86f484881c555ec90d836fd7ca9c8db0f0a5b Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Thu, 20 May 2021 16:28:42 +0200 Subject: [PATCH 03/16] add todo --- test/_assert_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/_assert_utils.py b/test/_assert_utils.py index 86273555657..e766e2df4b8 100644 --- a/test/_assert_utils.py +++ b/test/_assert_utils.py @@ -1,4 +1,5 @@ """This is a temporary module and should be removed as soon as torch.testing.assert_equal is supported.""" +# TODO: remove this as soon torch.testing.assert_equal is supported import functools From 86402f0d0edfc9bba4c1c9f0753658de3dec1839 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Thu, 20 May 2021 16:34:30 +0200 Subject: [PATCH 04/16] flake8 --- test/test_hub.py | 2 +- test/test_image.py | 1 - test/test_models.py | 2 -- 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/test/test_hub.py b/test/test_hub.py index 96fcc664959..29ae90014d1 100644 --- a/test/test_hub.py +++ b/test/test_hub.py @@ -33,7 +33,7 @@ def test_load_from_github(self): progress=False) self.assertAlmostEqual(sum_of_model_parameters(hub_model).item(), SUM_OF_PRETRAINED_RESNET18_PARAMS, - places = 2) + places=2) def test_set_dir(self): temp_dir = tempfile.gettempdir() diff --git a/test/test_image.py b/test/test_image.py index 1d2eab5798c..c3d11f8ce6e 100644 --- a/test/test_image.py +++ b/test/test_image.py @@ -286,7 +286,6 @@ def test_write_file_non_ascii(self): def test_decode_jpeg_cuda(mode, img_path, scripted): if 'cmyk' in img_path: pytest.xfail("Decoding a CMYK jpeg isn't supported") - tester = ImageTester() data = read_file(img_path) img = decode_image(data, mode=mode) f = torch.jit.script(decode_jpeg) if scripted else decode_jpeg diff --git a/test/test_models.py b/test/test_models.py index a9e26a3f159..a86392cf8b6 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -301,8 +301,6 @@ def test_memory_efficient_densenet(self): model2.eval() out2 = model2(x) - max_diff = (out1 - out2).abs().max() - self.assertTrue(num_params == num_grad) torch.testing.assert_close(out1, out2, rtol=0.0, atol=1e-5) From 48d32e648a56ae94b8a042957d2d1198950754c1 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Thu, 20 May 2021 16:16:25 +0100 Subject: [PATCH 05/16] Hopefully fixed test_functional_tensor --- test/common_utils.py | 2 +- test/test_functional_tensor.py | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index cc7d9c611b8..c79cc73b91a 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -348,7 +348,7 @@ def compareTensorToPIL(self, tensor, pil_image, msg=None): pil_tensor = torch.as_tensor(np_pil_image.transpose((2, 0, 1))) if msg is None: msg = "tensor:\n{} \ndid not equal PIL tensor:\n{}".format(tensor, pil_tensor) - assert_equal(tensor.cpu(), pil_tensor, msg=msg) + assert_equal(tensor.cpu(), pil_tensor, check_stride=False, msg=msg) def approxEqualTensorToPIL(self, tensor, pil_image, tol=1e-5, msg=None, agg_method="mean", allowed_percentage_diff=None): diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index 65c04c685eb..b2be22f9794 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -410,6 +410,7 @@ def test_resized_crop(self): assert_equal( expected_out_tensor, out_tensor, + check_stride=False, msg="{} vs {}".format(expected_out_tensor[0, :10, :10], out_tensor[0, :10, :10]), ) @@ -454,6 +455,7 @@ def _test_affine_square_rotations(self, tensor, pil_img, scripted_affine): true_tensor, out_tensor, msg="{}\n{} vs \n{}".format(a, out_tensor[0, :5, :5], true_tensor[0, :5, :5]), + check_stride=False, ) if out_tensor.dtype != torch.uint8: @@ -603,7 +605,7 @@ def test_affine(self): with self.assertWarnsRegex(UserWarning, r"Argument fillcolor is deprecated and will be removed"): res1 = F.affine(pil_img, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], fillcolor=10) res2 = F.affine(pil_img, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], fill=10) - assert_equal(res1, res2) + assert res1 == res2 def _test_rotate_all_options(self, tensor, pil_img, scripted_rotate, centers): img_size = pil_img.size @@ -747,7 +749,8 @@ def test_gaussian_blur(self): for fn in [F.gaussian_blur, scripted_transform]: out = fn(tensor, kernel_size=ksize, sigma=sigma) torch.testing.assert_close( - out, true_out, rtol=0.0, atol=1.0, msg="{}, {}".format(ksize, sigma) + out, true_out, rtol=0.0, atol=1.0, check_stride=False, + msg="{}, {}".format(ksize, sigma) ) From 15b50e3d5a13dae61b926e95132b5afb14b14cc6 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Thu, 20 May 2021 16:31:37 +0100 Subject: [PATCH 06/16] hopefully fixed test_ops --- test/test_ops.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/test_ops.py b/test/test_ops.py index 180ef571bba..88bcf7f540f 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -946,7 +946,7 @@ class BoxAreaTester(unittest.TestCase): def test_box_area(self): def area_check(box, expected, tolerance=1e-4): out = ops.box_area(box) - torch.testing.assert_close(out, expected, rtol=0.0, atol=tolerance) + torch.testing.assert_close(out, expected, rtol=0.0, check_dtype=False, atol=tolerance) # Check for int boxes for dtype in [torch.int8, torch.int16, torch.int32, torch.int64]: @@ -974,7 +974,7 @@ class BoxIouTester(unittest.TestCase): def test_iou(self): def iou_check(box, expected, tolerance=1e-4): out = ops.box_iou(box, box) - torch.testing.assert_close(out, expected, rtol=0.0, atol=tolerance) + torch.testing.assert_close(out, expected, rtol=0.0, check_dtype=False, atol=tolerance) # Check for int boxes for dtype in [torch.int16, torch.int32, torch.int64]: @@ -995,7 +995,7 @@ class GenBoxIouTester(unittest.TestCase): def test_gen_iou(self): def gen_iou_check(box, expected, tolerance=1e-4): out = ops.generalized_box_iou(box, box) - torch.testing.assert_close(out, expected, rtol=0.0, atol=tolerance) + torch.testing.assert_close(out, expected, rtol=0.0, check_dtype=False, atol=tolerance) # Check for int boxes for dtype in [torch.int16, torch.int32, torch.int64]: From 61874ac3ef1b8c5ea836736928abead8e9c19d14 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Thu, 20 May 2021 17:01:16 +0100 Subject: [PATCH 07/16] Fix test_utils --- test/test_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/test_utils.py b/test/test_utils.py index 2bb1a20cc0e..e9bafdeab42 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -198,11 +198,11 @@ def test_draw_segmentation_masks(colors, alpha): color = torch.tensor(color, dtype=dtype) if alpha == 1: - assert_equal(out[:, mask], color[:, None]) + assert (out[:, mask] == color[:, None]).all() elif alpha == 0: - assert_equal(out[:, mask], img[:, mask]) + assert (out[:, mask] == img[:, mask]).all() - interpolated_color = (img[:, mask] * (1 - alpha) + color[:, None] * alpha) + interpolated_color = (img[:, mask] * (1 - alpha) + color[:, None] * alpha).to(dtype) torch.testing.assert_close(out[:, mask], interpolated_color, rtol=0.0, atol=1.0) From 30f20a39309cb1ddaf9f968611321b560b52e0f1 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Thu, 20 May 2021 17:08:13 +0100 Subject: [PATCH 08/16] revert unwanted changes to test_image --- test/test_image.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/test/test_image.py b/test/test_image.py index 06703909f85..ebc47fde9e4 100644 --- a/test/test_image.py +++ b/test/test_image.py @@ -65,7 +65,8 @@ def test_decode_jpeg(self): # Permit a small variation on pixel values to account for implementation # differences between Pillow and LibJPEG. - torch.testing.assert_close(img_ljpeg, img_pil, rtol=0.0, atol=2.0) + abs_mean_diff = (img_ljpeg.type(torch.float32) - img_pil).abs().mean().item() + self.assertTrue(abs_mean_diff < 2) with self.assertRaisesRegex(RuntimeError, "Expected a non empty 1-dimensional tensor"): decode_jpeg(torch.empty((100, 1), dtype=torch.uint8)) @@ -293,13 +294,14 @@ def test_write_file_non_ascii(self): def test_decode_jpeg_cuda(mode, img_path, scripted): if 'cmyk' in img_path: pytest.xfail("Decoding a CMYK jpeg isn't supported") + tester = ImageTester() data = read_file(img_path) img = decode_image(data, mode=mode) f = torch.jit.script(decode_jpeg) if scripted else decode_jpeg - img_nvjpeg = f(data, mode=mode, device='cuda').cpu() + img_nvjpeg = f(data, mode=mode, device='cuda') # Some difference expected between jpeg implementations - torch.testing.assert_close(img, img_nvjpeg, rtol=0.0, atol=2.0) + tester.assertTrue((img.float() - img_nvjpeg.cpu().float()).abs().mean() < 2) @needs_cuda From 3a29ae34cd5158ed067b3ddd7d0ab5c48bad6ca1 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Thu, 20 May 2021 18:19:29 +0100 Subject: [PATCH 09/16] maybe fixed test_transforms --- test/test_transforms.py | 62 +++++++++++++++++++++++------------------ 1 file changed, 35 insertions(+), 27 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index da8813a20f9..f349205be74 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -104,7 +104,8 @@ def test_center_crop_2(self): # Ensure output for PIL and Tensor are equal assert_equal( - output_tensor, output_pil, msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) + output_tensor, output_pil, check_stride=False, + msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) ) # Check if content in center of both image and cropped output is same. @@ -129,7 +130,8 @@ def test_center_crop_2(self): ] assert_equal( - output_center, img_center, msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) + output_center, img_center, check_stride=False, + msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) ) def test_five_crop(self): @@ -417,8 +419,10 @@ def test_pad(self): # to the pad value fill_v = fill / 255 eps = 1e-5 - torch.testing.assert_close(result[:, :padding, :], fill_v, rtol=0.0, atol=eps) - torch.testing.assert_close(result[:, :, :padding], fill_v, rtol=0.0, atol=eps) + h_padded = result[:, :padding, :] + w_padded = result[:, :, :padding] + torch.testing.assert_close(h_padded, torch.full_like(h_padded, fill_value=fill_v), check_stride=False, rtol=0.0, atol=eps) + torch.testing.assert_close(w_padded, torch.full_like(w_padded, fill_value=fill_v), check_stride=False, rtol=0.0, atol=eps) self.assertRaises(ValueError, transforms.Pad(padding, fill=(1, 2)), transforms.ToPILImage()(img)) @@ -451,7 +455,7 @@ def test_pad_with_non_constant_padding_modes(self): # First 6 elements of leftmost edge in the middle of the image, values are in order: # edge_pad, edge_pad, edge_pad, constant_pad, constant value added to leftmost edge, 0 edge_middle_slice = np.asarray(edge_padded_img).transpose(2, 0, 1)[0][17][:6] - assert_equal(edge_middle_slice, np.asarray([200, 200, 200, 200, 1, 0])) + assert_equal(edge_middle_slice, np.asarray([200, 200, 200, 200, 1, 0]), check_dtype=False, check_stride=False) self.assertEqual(transforms.ToTensor()(edge_padded_img).size(), (3, 35, 35)) # Pad 3 to left/right, 2 to top/bottom @@ -459,7 +463,7 @@ def test_pad_with_non_constant_padding_modes(self): # First 6 elements of leftmost edge in the middle of the image, values are in order: # reflect_pad, reflect_pad, reflect_pad, constant_pad, constant value added to leftmost edge, 0 reflect_middle_slice = np.asarray(reflect_padded_img).transpose(2, 0, 1)[0][17][:6] - assert_equal(reflect_middle_slice, np.asarray([0, 0, 1, 200, 1, 0])) + assert_equal(reflect_middle_slice, np.asarray([0, 0, 1, 200, 1, 0]), check_dtype=False, check_stride=False) self.assertEqual(transforms.ToTensor()(reflect_padded_img).size(), (3, 33, 35)) # Pad 3 to left, 2 to top, 2 to right, 1 to bottom @@ -467,7 +471,7 @@ def test_pad_with_non_constant_padding_modes(self): # First 6 elements of leftmost edge in the middle of the image, values are in order: # sym_pad, sym_pad, sym_pad, constant_pad, constant value added to leftmost edge, 0 symmetric_middle_slice = np.asarray(symmetric_padded_img).transpose(2, 0, 1)[0][17][:6] - assert_equal(symmetric_middle_slice, np.asarray([0, 1, 200, 200, 1, 0])) + assert_equal(symmetric_middle_slice, np.asarray([0, 1, 200, 200, 1, 0]), check_dtype=False, check_stride=False) self.assertEqual(transforms.ToTensor()(symmetric_padded_img).size(), (3, 32, 34)) # Check negative padding explicitly for symmetric case, since it is not @@ -476,8 +480,8 @@ def test_pad_with_non_constant_padding_modes(self): symmetric_padded_img_neg = F.pad(img, (-1, 2, 3, -3), padding_mode='symmetric') symmetric_neg_middle_left = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][:3] symmetric_neg_middle_right = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][-4:] - assert_equal(symmetric_neg_middle_left, np.asarray([1, 0, 0])) - assert_equal(symmetric_neg_middle_right, np.asarray([200, 200, 0, 0])) + assert_equal(symmetric_neg_middle_left, np.asarray([1, 0, 0]), check_dtype=False, check_stride=False) + assert_equal(symmetric_neg_middle_right, np.asarray([200, 200, 0, 0]), check_dtype=False, check_stride=False) self.assertEqual(transforms.ToTensor()(symmetric_padded_img_neg).size(), (3, 28, 31)) def test_pad_raises_with_invalid_pad_sequence_len(self): @@ -616,23 +620,25 @@ def test_to_tensor(self): input_data = torch.ByteTensor(channels, height, width).random_(0, 255).float().div_(255) img = transforms.ToPILImage()(input_data) output = trans(img) - torch.testing.assert_close(output, input_data) + torch.testing.assert_close(output, input_data, check_stride=False) ndarray = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8) output = trans(ndarray) expected_output = ndarray.transpose((2, 0, 1)) / 255.0 - torch.testing.assert_close(output, expected_output) + torch.testing.assert_close(output, torch.as_tensor(expected_output), + check_stride=False, check_dtype=False) ndarray = np.random.rand(height, width, channels).astype(np.float32) output = trans(ndarray) expected_output = ndarray.transpose((2, 0, 1)) - torch.testing.assert_close(output, expected_output) + torch.testing.assert_close(output, torch.as_tensor(expected_output), + check_stride=False, check_dtype=False) # separate test for mode '1' PIL images input_data = torch.ByteTensor(1, height, width).bernoulli_() img = transforms.ToPILImage()(input_data.mul(255)).convert('1') output = trans(img) - torch.testing.assert_close(input_data, output) + torch.testing.assert_close(input_data, output, check_dtype=False, check_stride=False) def test_to_tensor_with_other_default_dtypes(self): current_def_dtype = torch.get_default_dtype() @@ -798,7 +804,7 @@ def test_pil_to_tensor(self): input_data = torch.ByteTensor(channels, height, width).random_(0, 255) img = transforms.ToPILImage()(input_data) output = trans(img) - torch.testing.assert_close(input_data, output) + torch.testing.assert_close(input_data, output, check_stride=False) input_data = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8) img = transforms.ToPILImage()(input_data) @@ -810,13 +816,13 @@ def test_pil_to_tensor(self): img = transforms.ToPILImage()(input_data) # CHW -> HWC and (* 255).byte() output = trans(img) # HWC -> CHW expected_output = (input_data * 255).byte() - torch.testing.assert_close(output, expected_output) + torch.testing.assert_close(output, expected_output, check_stride=False) # separate test for mode '1' PIL images input_data = torch.ByteTensor(1, height, width).bernoulli_() img = transforms.ToPILImage()(input_data.mul(255)).convert('1') output = trans(img) - torch.testing.assert_close(input_data, output) + torch.testing.assert_close(input_data, output, check_stride=False, check_dtype=False) @unittest.skipIf(accimage is None, 'accimage not available') def test_accimage_pil_to_tensor(self): @@ -882,7 +888,7 @@ def test_1_channel_tensor_to_pil_image(self): for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: img = transform(img_data) self.assertEqual(img.mode, mode) - torch.testing.assert_close(expected_output, to_tensor(img).numpy()) + torch.testing.assert_close(expected_output, to_tensor(img).numpy(), check_stride=False) # 'F' mode for torch.FloatTensor img_F_mode = transforms.ToPILImage(mode='F')(img_data_float) self.assertEqual(img_F_mode.mode, 'F') @@ -914,7 +920,7 @@ def verify_img_data(img_data, mode): self.assertEqual(img.mode, mode) split = img.split() for i in range(2): - torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i])) + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy() for mode in [None, 'LA']: @@ -987,7 +993,7 @@ def verify_img_data(img_data, mode): self.assertEqual(img.mode, mode) split = img.split() for i in range(3): - torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i])) + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy() for mode in [None, 'RGB', 'HSV', 'YCbCr']: @@ -1036,7 +1042,7 @@ def verify_img_data(img_data, mode): self.assertEqual(img.mode, mode) split = img.split() for i in range(4): - torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i])) + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy() for mode in [None, 'RGBA', 'CMYK', 'RGBX']: @@ -1067,7 +1073,7 @@ def test_2d_tensor_to_pil_image(self): for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: img = transform(img_data) self.assertEqual(img.mode, mode) - torch.testing.assert_close(expected_output, to_tensor(img).numpy()) + np.testing.assert_allclose(expected_output, to_tensor(img).numpy()[0]) def test_2d_ndarray_to_pil_image(self): img_data_float = torch.Tensor(4, 4).uniform_().numpy() @@ -1081,7 +1087,7 @@ def test_2d_ndarray_to_pil_image(self): for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: img = transform(img_data) self.assertEqual(img.mode, mode) - torch.testing.assert_close(img_data, img) + np.testing.assert_allclose(img_data, img) def test_tensor_bad_types_to_pil_image(self): with self.assertRaisesRegex(ValueError, r'pic should be 2/3 dimensional. Got \d+ dimensions.'): @@ -1453,8 +1459,10 @@ def test_linear_transformation(self): cov += np.dot(xwhite, xwhite.T) / num_features mean += np.sum(xwhite) / num_features # if rtol for std = 1e-3 then rtol for cov = 2e-3 as std**2 = cov - torch.testing.assert_close(cov / num_samples, np.identity(1), rtol=2e-3, atol=1e-8, msg="cov not close to 1") - torch.testing.assert_close(mean / num_samples, 0, rtol=1e-3, atol=1e-8, msg="mean not close to 1") + torch.testing.assert_close(cov / num_samples, np.identity(1), rtol=2e-3, atol=1e-8, check_dtype=False, + msg="cov not close to 1") + torch.testing.assert_close(mean / num_samples, 0, rtol=1e-3, atol=1e-8, check_dtype=False, + msg="mean not close to 1") # Checking if LinearTransformation can be printed as string whitening.__repr__() @@ -1743,7 +1751,7 @@ def test_to_grayscale(self): self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel') assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) - assert_equal(gray_np, gray_np_2[:, :, 0]) + assert_equal(gray_np, gray_np_2[:, :, 0], check_stride=False) # Case 3: 1 channel grayscale -> 1 channel grayscale trans3 = transforms.Grayscale(num_output_channels=1) @@ -1761,7 +1769,7 @@ def test_to_grayscale(self): self.assertEqual(gray_np_4.shape, tuple(x_shape), 'should be 3 channel') assert_equal(gray_np_4[:, :, 0], gray_np_4[:, :, 1]) assert_equal(gray_np_4[:, :, 1], gray_np_4[:, :, 2]) - assert_equal(gray_np, gray_np_4[:, :, 0]) + assert_equal(gray_np, gray_np_4[:, :, 0], check_stride=False) # Checking if Grayscale can be printed as string trans4.__repr__() @@ -1830,7 +1838,7 @@ def test_random_grayscale(self): self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel') assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) - assert_equal(gray_np, gray_np_2[:, :, 0]) + assert_equal(gray_np, gray_np_2[:, :, 0], check_stride=False) # Case 3b: RGB -> 3 channel grayscale (unchanged) trans2 = transforms.RandomGrayscale(p=0.0) From 863f144e727df862fe800357af65365b4a9bff93 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Fri, 21 May 2021 08:24:44 +0200 Subject: [PATCH 10/16] fix test_datasets_video_utils --- test/test_datasets_video_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/test_datasets_video_utils.py b/test/test_datasets_video_utils.py index 9cd3b775934..a25ad8f75ba 100644 --- a/test/test_datasets_video_utils.py +++ b/test/test_datasets_video_utils.py @@ -41,7 +41,7 @@ def test_unfold(self): [0, 1, 2], [3, 4, 5], ]) - assert_equal(r, expected) + assert_equal(r, expected, check_stride=False) r = unfold(a, 3, 2, 1) expected = torch.tensor([ @@ -49,14 +49,14 @@ def test_unfold(self): [2, 3, 4], [4, 5, 6] ]) - assert_equal(r, expected) + assert_equal(r, expected, check_stride=False) r = unfold(a, 3, 2, 2) expected = torch.tensor([ [0, 2, 4], [2, 4, 6], ]) - assert_equal(r, expected) + assert_equal(r, expected, check_stride=False) @unittest.skipIf(not io.video._av_available(), "this test requires av") def test_video_clips(self): From c8a5afaf5a63d209ff34834fe3ee0ba854e8a951 Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Fri, 21 May 2021 08:24:59 +0200 Subject: [PATCH 11/16] fix test_transforms --- test/test_transforms.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index f349205be74..7925d4c9115 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -821,8 +821,8 @@ def test_pil_to_tensor(self): # separate test for mode '1' PIL images input_data = torch.ByteTensor(1, height, width).bernoulli_() img = transforms.ToPILImage()(input_data.mul(255)).convert('1') - output = trans(img) - torch.testing.assert_close(input_data, output, check_stride=False, check_dtype=False) + output = trans(img).view(torch.uint8).bool().to(torch.uint8) + torch.testing.assert_close(input_data, output, check_stride=False) @unittest.skipIf(accimage is None, 'accimage not available') def test_accimage_pil_to_tensor(self): From 93614f0fbccfffcbd21dbb33129a8572c43b2bcb Mon Sep 17 00:00:00 2001 From: Philip Meier Date: Fri, 21 May 2021 08:27:09 +0200 Subject: [PATCH 12/16] flake8 --- test/test_transforms.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/test_transforms.py b/test/test_transforms.py index 7925d4c9115..255cca8eae0 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -421,8 +421,12 @@ def test_pad(self): eps = 1e-5 h_padded = result[:, :padding, :] w_padded = result[:, :, :padding] - torch.testing.assert_close(h_padded, torch.full_like(h_padded, fill_value=fill_v), check_stride=False, rtol=0.0, atol=eps) - torch.testing.assert_close(w_padded, torch.full_like(w_padded, fill_value=fill_v), check_stride=False, rtol=0.0, atol=eps) + torch.testing.assert_close( + h_padded, torch.full_like(h_padded, fill_value=fill_v), check_stride=False, rtol=0.0, atol=eps + ) + torch.testing.assert_close( + w_padded, torch.full_like(w_padded, fill_value=fill_v), check_stride=False, rtol=0.0, atol=eps + ) self.assertRaises(ValueError, transforms.Pad(padding, fill=(1, 2)), transforms.ToPILImage()(img)) From 0b237c7f8cecc672d2aac36c5953f73c9b6a8a62 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Fri, 21 May 2021 09:26:35 +0100 Subject: [PATCH 13/16] use cu102 see if the nightlies are actual nightlies? --- .circleci/regenerate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/regenerate.py b/.circleci/regenerate.py index 5fea42c5b47..76efc561d77 100755 --- a/.circleci/regenerate.py +++ b/.circleci/regenerate.py @@ -234,7 +234,7 @@ def unittest_workflows(indentation=6): if device_type == 'gpu': if python_version != "3.8": job['filters'] = gen_filter_branch_tree('master', 'nightly') - job['cu_version'] = 'cu101' + job['cu_version'] = 'cu102' else: job['cu_version'] = 'cpu' From c2ace863769a0a1e7f0fe4540412b5414e701307 Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Fri, 21 May 2021 09:27:03 +0100 Subject: [PATCH 14/16] obviously forgot to call regenerate.py --- .circleci/config.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index fe0c913674d..5568be97e52 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1468,7 +1468,7 @@ workflows: name: unittest_linux_cpu_py3.9 python_version: '3.9' - unittest_linux_gpu: - cu_version: cu101 + cu_version: cu102 filters: branches: only: @@ -1477,7 +1477,7 @@ workflows: name: unittest_linux_gpu_py3.6 python_version: '3.6' - unittest_linux_gpu: - cu_version: cu101 + cu_version: cu102 filters: branches: only: @@ -1486,11 +1486,11 @@ workflows: name: unittest_linux_gpu_py3.7 python_version: '3.7' - unittest_linux_gpu: - cu_version: cu101 + cu_version: cu102 name: unittest_linux_gpu_py3.8 python_version: '3.8' - unittest_linux_gpu: - cu_version: cu101 + cu_version: cu102 filters: branches: only: @@ -1515,7 +1515,7 @@ workflows: name: unittest_windows_cpu_py3.9 python_version: '3.9' - unittest_windows_gpu: - cu_version: cu101 + cu_version: cu102 filters: branches: only: @@ -1524,7 +1524,7 @@ workflows: name: unittest_windows_gpu_py3.6 python_version: '3.6' - unittest_windows_gpu: - cu_version: cu101 + cu_version: cu102 filters: branches: only: @@ -1533,11 +1533,11 @@ workflows: name: unittest_windows_gpu_py3.7 python_version: '3.7' - unittest_windows_gpu: - cu_version: cu101 + cu_version: cu102 name: unittest_windows_gpu_py3.8 python_version: '3.8' - unittest_windows_gpu: - cu_version: cu101 + cu_version: cu102 filters: branches: only: From d78226a4d658e8319e9d3933cbc13eae7a13ce9c Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Fri, 21 May 2021 09:45:31 +0100 Subject: [PATCH 15/16] not as obvious, reverting --- .circleci/config.yml | 16 ++++++++-------- .circleci/regenerate.py | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 5568be97e52..fe0c913674d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1468,7 +1468,7 @@ workflows: name: unittest_linux_cpu_py3.9 python_version: '3.9' - unittest_linux_gpu: - cu_version: cu102 + cu_version: cu101 filters: branches: only: @@ -1477,7 +1477,7 @@ workflows: name: unittest_linux_gpu_py3.6 python_version: '3.6' - unittest_linux_gpu: - cu_version: cu102 + cu_version: cu101 filters: branches: only: @@ -1486,11 +1486,11 @@ workflows: name: unittest_linux_gpu_py3.7 python_version: '3.7' - unittest_linux_gpu: - cu_version: cu102 + cu_version: cu101 name: unittest_linux_gpu_py3.8 python_version: '3.8' - unittest_linux_gpu: - cu_version: cu102 + cu_version: cu101 filters: branches: only: @@ -1515,7 +1515,7 @@ workflows: name: unittest_windows_cpu_py3.9 python_version: '3.9' - unittest_windows_gpu: - cu_version: cu102 + cu_version: cu101 filters: branches: only: @@ -1524,7 +1524,7 @@ workflows: name: unittest_windows_gpu_py3.6 python_version: '3.6' - unittest_windows_gpu: - cu_version: cu102 + cu_version: cu101 filters: branches: only: @@ -1533,11 +1533,11 @@ workflows: name: unittest_windows_gpu_py3.7 python_version: '3.7' - unittest_windows_gpu: - cu_version: cu102 + cu_version: cu101 name: unittest_windows_gpu_py3.8 python_version: '3.8' - unittest_windows_gpu: - cu_version: cu102 + cu_version: cu101 filters: branches: only: diff --git a/.circleci/regenerate.py b/.circleci/regenerate.py index 76efc561d77..5fea42c5b47 100755 --- a/.circleci/regenerate.py +++ b/.circleci/regenerate.py @@ -234,7 +234,7 @@ def unittest_workflows(indentation=6): if device_type == 'gpu': if python_version != "3.8": job['filters'] = gen_filter_branch_tree('master', 'nightly') - job['cu_version'] = 'cu102' + job['cu_version'] = 'cu101' else: job['cu_version'] = 'cpu' From 7d46f4a7a688dffdf45ae2f4e5099913430be0df Mon Sep 17 00:00:00 2001 From: Nicolas Hug Date: Fri, 21 May 2021 11:59:48 +0100 Subject: [PATCH 16/16] revert everything but test_utils --- test/common_utils.py | 7 +- test/test_datasets_samplers.py | 21 ++- test/test_datasets_video_utils.py | 31 ++-- test/test_functional_tensor.py | 83 ++++----- test/test_image.py | 11 +- test/test_io.py | 26 ++- test/test_models.py | 8 +- test/test_models_detection_anchor_utils.py | 9 +- test/test_models_detection_utils.py | 5 +- test/test_onnx.py | 9 +- test/test_ops.py | 83 +++++---- test/test_transforms.py | 185 ++++++++++----------- test/test_transforms_tensor.py | 29 ++-- test/test_transforms_video.py | 3 +- test/test_video_reader.py | 13 +- 15 files changed, 258 insertions(+), 265 deletions(-) diff --git a/test/common_utils.py b/test/common_utils.py index c79cc73b91a..1f48e8c649b 100644 --- a/test/common_utils.py +++ b/test/common_utils.py @@ -20,8 +20,6 @@ import numpy as np from PIL import Image -from _assert_utils import assert_equal - IS_PY39 = sys.version_info.major == 3 and sys.version_info.minor == 9 PY39_SEGFAULT_SKIP_MSG = "Segmentation fault with Python 3.9, see https://github.com/pytorch/vision/issues/3367" PY39_SKIP = unittest.skipIf(IS_PY39, PY39_SEGFAULT_SKIP_MSG) @@ -141,8 +139,7 @@ def assertExpected(self, output, name, prec=None): raise RuntimeError("The output for {}, is larger than 50kb".format(filename)) else: expected = torch.load(expected_file) - rtol = atol = prec or self.precision - torch.testing.assert_close(output, expected, rtol=rtol, atol=atol) + self.assertEqual(output, expected, prec=prec) def assertEqual(self, x, y, prec=None, message='', allow_inf=False): """ @@ -348,7 +345,7 @@ def compareTensorToPIL(self, tensor, pil_image, msg=None): pil_tensor = torch.as_tensor(np_pil_image.transpose((2, 0, 1))) if msg is None: msg = "tensor:\n{} \ndid not equal PIL tensor:\n{}".format(tensor, pil_tensor) - assert_equal(tensor.cpu(), pil_tensor, check_stride=False, msg=msg) + self.assertTrue(tensor.cpu().equal(pil_tensor), msg) def approxEqualTensorToPIL(self, tensor, pil_image, tol=1e-5, msg=None, agg_method="mean", allowed_percentage_diff=None): diff --git a/test/test_datasets_samplers.py b/test/test_datasets_samplers.py index 10d8704dbb1..e76f4f9d007 100644 --- a/test/test_datasets_samplers.py +++ b/test/test_datasets_samplers.py @@ -14,7 +14,6 @@ from torchvision import get_video_backend from common_utils import get_tmp_dir -from _assert_utils import assert_equal @contextlib.contextmanager @@ -48,8 +47,8 @@ def test_random_clip_sampler(self): indices = torch.tensor(list(iter(sampler))) videos = torch.div(indices, 5, rounding_mode='floor') v_idxs, count = torch.unique(videos, return_counts=True) - assert_equal(v_idxs, torch.tensor([0, 1, 2])) - assert_equal(count, torch.tensor([3, 3, 3])) + self.assertTrue(v_idxs.equal(torch.tensor([0, 1, 2]))) + self.assertTrue(count.equal(torch.tensor([3, 3, 3]))) def test_random_clip_sampler_unequal(self): with get_list_of_videos(num_videos=3, sizes=[10, 25, 25]) as video_list: @@ -65,8 +64,8 @@ def test_random_clip_sampler_unequal(self): indices = torch.tensor(indices) - 2 videos = torch.div(indices, 5, rounding_mode='floor') v_idxs, count = torch.unique(videos, return_counts=True) - assert_equal(v_idxs, torch.tensor([0, 1])) - assert_equal(count, torch.tensor([3, 3])) + self.assertTrue(v_idxs.equal(torch.tensor([0, 1]))) + self.assertTrue(count.equal(torch.tensor([3, 3]))) def test_uniform_clip_sampler(self): with get_list_of_videos(num_videos=3, sizes=[25, 25, 25]) as video_list: @@ -76,9 +75,9 @@ def test_uniform_clip_sampler(self): indices = torch.tensor(list(iter(sampler))) videos = torch.div(indices, 5, rounding_mode='floor') v_idxs, count = torch.unique(videos, return_counts=True) - assert_equal(v_idxs, torch.tensor([0, 1, 2])) - assert_equal(count, torch.tensor([3, 3, 3])) - assert_equal(indices, torch.tensor([0, 2, 4, 5, 7, 9, 10, 12, 14])) + self.assertTrue(v_idxs.equal(torch.tensor([0, 1, 2]))) + self.assertTrue(count.equal(torch.tensor([3, 3, 3]))) + self.assertTrue(indices.equal(torch.tensor([0, 2, 4, 5, 7, 9, 10, 12, 14]))) def test_uniform_clip_sampler_insufficient_clips(self): with get_list_of_videos(num_videos=3, sizes=[10, 25, 25]) as video_list: @@ -86,7 +85,7 @@ def test_uniform_clip_sampler_insufficient_clips(self): sampler = UniformClipSampler(video_clips, 3) self.assertEqual(len(sampler), 3 * 3) indices = torch.tensor(list(iter(sampler))) - assert_equal(indices, torch.tensor([0, 0, 1, 2, 4, 6, 7, 9, 11])) + self.assertTrue(indices.equal(torch.tensor([0, 0, 1, 2, 4, 6, 7, 9, 11]))) def test_distributed_sampler_and_uniform_clip_sampler(self): with get_list_of_videos(num_videos=3, sizes=[25, 25, 25]) as video_list: @@ -101,7 +100,7 @@ def test_distributed_sampler_and_uniform_clip_sampler(self): ) indices = torch.tensor(list(iter(distributed_sampler_rank0))) self.assertEqual(len(distributed_sampler_rank0), 6) - assert_equal(indices, torch.tensor([0, 2, 4, 10, 12, 14])) + self.assertTrue(indices.equal(torch.tensor([0, 2, 4, 10, 12, 14]))) distributed_sampler_rank1 = DistributedSampler( clip_sampler, @@ -111,7 +110,7 @@ def test_distributed_sampler_and_uniform_clip_sampler(self): ) indices = torch.tensor(list(iter(distributed_sampler_rank1))) self.assertEqual(len(distributed_sampler_rank1), 6) - assert_equal(indices, torch.tensor([5, 7, 9, 0, 2, 4])) + self.assertTrue(indices.equal(torch.tensor([5, 7, 9, 0, 2, 4]))) if __name__ == '__main__': diff --git a/test/test_datasets_video_utils.py b/test/test_datasets_video_utils.py index a25ad8f75ba..694214544f7 100644 --- a/test/test_datasets_video_utils.py +++ b/test/test_datasets_video_utils.py @@ -7,7 +7,6 @@ from torchvision.datasets.video_utils import VideoClips, unfold from common_utils import get_tmp_dir -from _assert_utils import assert_equal @contextlib.contextmanager @@ -41,7 +40,7 @@ def test_unfold(self): [0, 1, 2], [3, 4, 5], ]) - assert_equal(r, expected, check_stride=False) + self.assertTrue(r.equal(expected)) r = unfold(a, 3, 2, 1) expected = torch.tensor([ @@ -49,14 +48,14 @@ def test_unfold(self): [2, 3, 4], [4, 5, 6] ]) - assert_equal(r, expected, check_stride=False) + self.assertTrue(r.equal(expected)) r = unfold(a, 3, 2, 2) expected = torch.tensor([ [0, 2, 4], [2, 4, 6], ]) - assert_equal(r, expected, check_stride=False) + self.assertTrue(r.equal(expected)) @unittest.skipIf(not io.video._av_available(), "this test requires av") def test_video_clips(self): @@ -65,22 +64,22 @@ def test_video_clips(self): self.assertEqual(video_clips.num_clips(), 1 + 2 + 3) for i, (v_idx, c_idx) in enumerate([(0, 0), (1, 0), (1, 1), (2, 0), (2, 1), (2, 2)]): video_idx, clip_idx = video_clips.get_clip_location(i) - assert_equal(video_idx, v_idx) - assert_equal(clip_idx, c_idx) + self.assertEqual(video_idx, v_idx) + self.assertEqual(clip_idx, c_idx) video_clips = VideoClips(video_list, 6, 6) self.assertEqual(video_clips.num_clips(), 0 + 1 + 2) for i, (v_idx, c_idx) in enumerate([(1, 0), (2, 0), (2, 1)]): video_idx, clip_idx = video_clips.get_clip_location(i) - assert_equal(video_idx, v_idx) - assert_equal(clip_idx, c_idx) + self.assertEqual(video_idx, v_idx) + self.assertEqual(clip_idx, c_idx) video_clips = VideoClips(video_list, 6, 1) self.assertEqual(video_clips.num_clips(), 0 + (10 - 6 + 1) + (15 - 6 + 1)) for i, v_idx, c_idx in [(0, 1, 0), (4, 1, 4), (5, 2, 0), (6, 2, 1)]: video_idx, clip_idx = video_clips.get_clip_location(i) - assert_equal(video_idx, v_idx) - assert_equal(clip_idx, c_idx) + self.assertEqual(video_idx, v_idx) + self.assertEqual(clip_idx, c_idx) @unittest.skipIf(not io.video._av_available(), "this test requires av") def test_video_clips_custom_fps(self): @@ -90,8 +89,8 @@ def test_video_clips_custom_fps(self): video_clips = VideoClips(video_list, num_frames, num_frames, fps, num_workers=2) for i in range(video_clips.num_clips()): video, audio, info, video_idx = video_clips.get_clip(i) - assert_equal(video.shape[0], num_frames) - assert_equal(info["video_fps"], fps) + self.assertEqual(video.shape[0], num_frames) + self.assertEqual(info["video_fps"], fps) # TODO add tests checking that the content is right def test_compute_clips_for_video(self): @@ -105,8 +104,8 @@ def test_compute_clips_for_video(self): orig_fps, new_fps) resampled_idxs = VideoClips._resample_video_idx(int(duration * new_fps), orig_fps, new_fps) self.assertEqual(len(clips), 1) - assert_equal(clips, idxs) - assert_equal(idxs[0], resampled_idxs) + self.assertTrue(clips.equal(idxs)) + self.assertTrue(idxs[0].equal(resampled_idxs)) # case 2: all frames appear only once num_frames = 4 @@ -117,8 +116,8 @@ def test_compute_clips_for_video(self): orig_fps, new_fps) resampled_idxs = VideoClips._resample_video_idx(int(duration * new_fps), orig_fps, new_fps) self.assertEqual(len(clips), 3) - assert_equal(clips, idxs) - assert_equal(idxs.flatten(), resampled_idxs) + self.assertTrue(clips.equal(idxs)) + self.assertTrue(idxs.flatten().equal(resampled_idxs)) # case 3: frames aren't enough for a clip num_frames = 32 diff --git a/test/test_functional_tensor.py b/test/test_functional_tensor.py index b2be22f9794..31a1c1a43e8 100644 --- a/test/test_functional_tensor.py +++ b/test/test_functional_tensor.py @@ -15,7 +15,6 @@ from torchvision.transforms import InterpolationMode from common_utils import TransformsTester, cpu_and_gpu, needs_cuda -from _assert_utils import assert_equal from typing import Dict, List, Sequence, Tuple @@ -40,13 +39,13 @@ def _test_fn_on_batch(self, batch_tensors, fn, scripted_fn_atol=1e-8, **fn_kwarg for i in range(len(batch_tensors)): img_tensor = batch_tensors[i, ...] transformed_img = fn(img_tensor, **fn_kwargs) - assert_equal(transformed_img, transformed_batch[i, ...]) + self.assertTrue(transformed_img.equal(transformed_batch[i, ...])) if scripted_fn_atol >= 0: scripted_fn = torch.jit.script(fn) # scriptable function test s_transformed_batch = scripted_fn(batch_tensors, **fn_kwargs) - torch.testing.assert_close(transformed_batch, s_transformed_batch, rtol=1e-5, atol=scripted_fn_atol) + self.assertTrue(transformed_batch.allclose(s_transformed_batch, atol=scripted_fn_atol)) def test_assert_image_tensor(self): shape = (100,) @@ -80,7 +79,7 @@ def test_vflip(self): # scriptable function test vflipped_img_script = script_vflip(img_tensor) - assert_equal(vflipped_img, vflipped_img_script) + self.assertTrue(vflipped_img.equal(vflipped_img_script)) batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device) self._test_fn_on_batch(batch_tensors, F.vflip) @@ -95,7 +94,7 @@ def test_hflip(self): # scriptable function test hflipped_img_script = script_hflip(img_tensor) - assert_equal(hflipped_img, hflipped_img_script) + self.assertTrue(hflipped_img.equal(hflipped_img_script)) batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device) self._test_fn_on_batch(batch_tensors, F.hflip) @@ -141,10 +140,11 @@ def test_hsv2rgb(self): for h1, s1, v1 in zip(h, s, v): rgb.append(colorsys.hsv_to_rgb(h1, s1, v1)) colorsys_img = torch.tensor(rgb, dtype=torch.float32, device=self.device) - torch.testing.assert_close(ft_img, colorsys_img, rtol=0.0, atol=1e-5) + max_diff = (ft_img - colorsys_img).abs().max() + self.assertLess(max_diff, 1e-5) s_rgb_img = scripted_fn(hsv_img) - torch.testing.assert_close(rgb_img, s_rgb_img) + self.assertTrue(rgb_img.allclose(s_rgb_img)) batch_tensors = self._create_data_batch(120, 100, num_samples=4, device=self.device).float() self._test_fn_on_batch(batch_tensors, F_t._hsv2rgb) @@ -177,7 +177,7 @@ def test_rgb2hsv(self): self.assertLess(max_diff, 1e-5) s_hsv_img = scripted_fn(rgb_img) - torch.testing.assert_close(hsv_img, s_hsv_img, rtol=1e-5, atol=1e-7) + self.assertTrue(hsv_img.allclose(s_hsv_img, atol=1e-7)) batch_tensors = self._create_data_batch(120, 100, num_samples=4, device=self.device).float() self._test_fn_on_batch(batch_tensors, F_t._rgb2hsv) @@ -194,7 +194,7 @@ def test_rgb_to_grayscale(self): self.approxEqualTensorToPIL(gray_tensor.float(), gray_pil_image, tol=1.0 + 1e-10, agg_method="max") s_gray_tensor = script_rgb_to_grayscale(img_tensor, num_output_channels=num_output_channels) - assert_equal(s_gray_tensor, gray_tensor) + self.assertTrue(s_gray_tensor.equal(gray_tensor)) batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device) self._test_fn_on_batch(batch_tensors, F.rgb_to_grayscale, num_output_channels=num_output_channels) @@ -240,12 +240,12 @@ def test_five_crop(self): for j in range(len(tuple_transformed_imgs)): true_transformed_img = tuple_transformed_imgs[j] transformed_img = tuple_transformed_batches[j][i, ...] - assert_equal(true_transformed_img, transformed_img) + self.assertTrue(true_transformed_img.equal(transformed_img)) # scriptable function test s_tuple_transformed_batches = script_five_crop(batch_tensors, [10, 11]) for transformed_batch, s_transformed_batch in zip(tuple_transformed_batches, s_tuple_transformed_batches): - assert_equal(transformed_batch, s_transformed_batch) + self.assertTrue(transformed_batch.equal(s_transformed_batch)) def test_ten_crop(self): script_ten_crop = torch.jit.script(F.ten_crop) @@ -272,12 +272,12 @@ def test_ten_crop(self): for j in range(len(tuple_transformed_imgs)): true_transformed_img = tuple_transformed_imgs[j] transformed_img = tuple_transformed_batches[j][i, ...] - assert_equal(true_transformed_img, transformed_img) + self.assertTrue(true_transformed_img.equal(transformed_img)) # scriptable function test s_tuple_transformed_batches = script_ten_crop(batch_tensors, [10, 11]) for transformed_batch, s_transformed_batch in zip(tuple_transformed_batches, s_tuple_transformed_batches): - assert_equal(transformed_batch, s_transformed_batch) + self.assertTrue(transformed_batch.equal(s_transformed_batch)) def test_pad(self): script_fn = torch.jit.script(F.pad) @@ -320,7 +320,7 @@ def test_pad(self): else: script_pad = pad pad_tensor_script = script_fn(tensor, script_pad, **kwargs) - assert_equal(pad_tensor, pad_tensor_script, msg="{}, {}".format(pad, kwargs)) + self.assertTrue(pad_tensor.equal(pad_tensor_script), msg="{}, {}".format(pad, kwargs)) self._test_fn_on_batch(batch_tensors, F.pad, padding=script_pad, **kwargs) @@ -348,10 +348,9 @@ def test_resize(self): resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, max_size=max_size) resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation, max_size=max_size) - assert_equal( - resized_tensor.size()[1:], - resized_pil_img.size[::-1], - msg="{}, {}".format(size, interpolation), + self.assertEqual( + resized_tensor.size()[1:], resized_pil_img.size[::-1], + msg="{}, {}".format(size, interpolation) ) if interpolation not in [NEAREST, ]: @@ -375,7 +374,7 @@ def test_resize(self): resize_result = script_fn(tensor, size=script_size, interpolation=interpolation, max_size=max_size) - assert_equal(resized_tensor, resize_result, msg="{}, {}".format(size, interpolation)) + self.assertTrue(resized_tensor.equal(resize_result), msg="{}, {}".format(size, interpolation)) self._test_fn_on_batch( batch_tensors, F.resize, size=script_size, interpolation=interpolation, max_size=max_size @@ -385,7 +384,7 @@ def test_resize(self): with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"): res1 = F.resize(tensor, size=32, interpolation=2) res2 = F.resize(tensor, size=32, interpolation=BILINEAR) - assert_equal(res1, res2) + self.assertTrue(res1.equal(res2)) for img in (tensor, pil_img): exp_msg = "max_size should only be passed if size specifies the length of the smaller edge" @@ -401,17 +400,15 @@ def test_resized_crop(self): for mode in [NEAREST, BILINEAR, BICUBIC]: out_tensor = F.resized_crop(tensor, top=0, left=0, height=26, width=36, size=[26, 36], interpolation=mode) - assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])) + self.assertTrue(tensor.equal(out_tensor), msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])) # 2) resize by half and crop a TL corner tensor, _ = self._create_data(26, 36, device=self.device) out_tensor = F.resized_crop(tensor, top=0, left=0, height=20, width=30, size=[10, 15], interpolation=NEAREST) expected_out_tensor = tensor[:, :20:2, :30:2] - assert_equal( - expected_out_tensor, - out_tensor, - check_stride=False, - msg="{} vs {}".format(expected_out_tensor[0, :10, :10], out_tensor[0, :10, :10]), + self.assertTrue( + expected_out_tensor.equal(out_tensor), + msg="{} vs {}".format(expected_out_tensor[0, :10, :10], out_tensor[0, :10, :10]) ) batch_tensors = self._create_data_batch(26, 36, num_samples=4, device=self.device) @@ -423,11 +420,15 @@ def _test_affine_identity_map(self, tensor, scripted_affine): # 1) identity map out_tensor = F.affine(tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST) - assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])) + self.assertTrue( + tensor.equal(out_tensor), msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5]) + ) out_tensor = scripted_affine( tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST ) - assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])) + self.assertTrue( + tensor.equal(out_tensor), msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5]) + ) def _test_affine_square_rotations(self, tensor, pil_img, scripted_affine): # 2) Test rotation @@ -451,11 +452,9 @@ def _test_affine_square_rotations(self, tensor, pil_img, scripted_affine): tensor, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST ) if true_tensor is not None: - assert_equal( - true_tensor, - out_tensor, - msg="{}\n{} vs \n{}".format(a, out_tensor[0, :5, :5], true_tensor[0, :5, :5]), - check_stride=False, + self.assertTrue( + true_tensor.equal(out_tensor), + msg="{}\n{} vs \n{}".format(a, out_tensor[0, :5, :5], true_tensor[0, :5, :5]) ) if out_tensor.dtype != torch.uint8: @@ -594,18 +593,18 @@ def test_affine(self): with self.assertWarnsRegex(UserWarning, r"Argument resample is deprecated and will be removed"): res1 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=2) res2 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=BILINEAR) - assert_equal(res1, res2) + self.assertTrue(res1.equal(res2)) # assert changed type warning with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"): res1 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=2) res2 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=BILINEAR) - assert_equal(res1, res2) + self.assertTrue(res1.equal(res2)) with self.assertWarnsRegex(UserWarning, r"Argument fillcolor is deprecated and will be removed"): res1 = F.affine(pil_img, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], fillcolor=10) res2 = F.affine(pil_img, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], fill=10) - assert res1 == res2 + self.assertEqual(res1, res2) def _test_rotate_all_options(self, tensor, pil_img, scripted_rotate, centers): img_size = pil_img.size @@ -683,13 +682,13 @@ def test_rotate(self): with self.assertWarnsRegex(UserWarning, r"Argument resample is deprecated and will be removed"): res1 = F.rotate(tensor, 45, resample=2) res2 = F.rotate(tensor, 45, interpolation=BILINEAR) - assert_equal(res1, res2) + self.assertTrue(res1.equal(res2)) # assert changed type warning with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"): res1 = F.rotate(tensor, 45, interpolation=2) res2 = F.rotate(tensor, 45, interpolation=BILINEAR) - assert_equal(res1, res2) + self.assertTrue(res1.equal(res2)) def test_gaussian_blur(self): small_image_tensor = torch.from_numpy( @@ -748,8 +747,10 @@ def test_gaussian_blur(self): for fn in [F.gaussian_blur, scripted_transform]: out = fn(tensor, kernel_size=ksize, sigma=sigma) - torch.testing.assert_close( - out, true_out, rtol=0.0, atol=1.0, check_stride=False, + self.assertEqual(true_out.shape, out.shape, msg="{}, {}".format(ksize, sigma)) + self.assertLessEqual( + torch.max(true_out.float() - out.float()), + 1.0, msg="{}, {}".format(ksize, sigma) ) @@ -770,7 +771,7 @@ def test_scale_channel(self): img_chan = torch.randint(0, 256, size=size).to('cpu') scaled_cpu = F_t._scale_channel(img_chan) scaled_cuda = F_t._scale_channel(img_chan.to('cuda')) - assert_equal(scaled_cpu, scaled_cuda.to('cpu')) + self.assertTrue(scaled_cpu.equal(scaled_cuda.to('cpu'))) def _get_data_dims_and_points_for_perspective(): diff --git a/test/test_image.py b/test/test_image.py index ebc47fde9e4..4ed001c61f7 100644 --- a/test/test_image.py +++ b/test/test_image.py @@ -8,7 +8,6 @@ import torch from PIL import Image from common_utils import get_tmp_dir, needs_cuda -from _assert_utils import assert_equal from torchvision.io.image import ( decode_png, decode_jpeg, encode_jpeg, write_jpeg, decode_image, read_file, @@ -108,7 +107,7 @@ def test_encode_jpeg(self): for src_img in [img, img.contiguous()]: # PIL sets jpeg quality to 75 by default jpeg_bytes = encode_jpeg(src_img, quality=75) - assert_equal(jpeg_bytes, pil_bytes) + self.assertTrue(jpeg_bytes.equal(pil_bytes)) with self.assertRaisesRegex( RuntimeError, "Input tensor dtype should be uint8"): @@ -192,7 +191,7 @@ def test_encode_png(self): rec_img = torch.from_numpy(np.array(rec_img)) rec_img = rec_img.permute(2, 0, 1) - assert_equal(img_pil, rec_img) + self.assertTrue(img_pil.equal(rec_img)) with self.assertRaisesRegex( RuntimeError, "Input tensor dtype should be uint8"): @@ -225,7 +224,7 @@ def test_write_png(self): saved_image = torch.from_numpy(np.array(Image.open(torch_png))) saved_image = saved_image.permute(2, 0, 1) - assert_equal(img_pil, saved_image) + self.assertTrue(img_pil.equal(saved_image)) def test_read_file(self): with get_tmp_dir() as d: @@ -236,7 +235,7 @@ def test_read_file(self): data = read_file(fpath) expected = torch.tensor(list(content), dtype=torch.uint8) - assert_equal(data, expected) + self.assertTrue(data.equal(expected)) os.unlink(fpath) with self.assertRaisesRegex( @@ -252,7 +251,7 @@ def test_read_file_non_ascii(self): data = read_file(fpath) expected = torch.tensor(list(content), dtype=torch.uint8) - assert_equal(data, expected) + self.assertTrue(data.equal(expected)) os.unlink(fpath) def test_write_file(self): diff --git a/test/test_io.py b/test/test_io.py index e86ea9e84fc..7d752bdbcf7 100644 --- a/test/test_io.py +++ b/test/test_io.py @@ -10,7 +10,6 @@ from urllib.error import URLError from common_utils import get_tmp_dir -from _assert_utils import assert_equal try: @@ -75,7 +74,7 @@ class TestIO(unittest.TestCase): def test_write_read_video(self): with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data): lv, _, info = io.read_video(f_name) - assert_equal(data, lv) + self.assertTrue(data.equal(lv)) self.assertEqual(info["video_fps"], 5) @unittest.skipIf(not io._HAS_VIDEO_OPT, "video_reader backend is not chosen") @@ -117,14 +116,14 @@ def test_read_partial_video(self): lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1]) s_data = data[start:(start + offset)] self.assertEqual(len(lv), offset) - assert_equal(s_data, lv) + self.assertTrue(s_data.equal(lv)) if get_video_backend() == "pyav": # for "video_reader" backend, we don't decode the closest early frame # when the given start pts is not matching any frame pts lv, _, _ = io.read_video(f_name, pts[4] + 1, pts[7]) self.assertEqual(len(lv), 4) - assert_equal(data[4:8], lv) + self.assertTrue(data[4:8].equal(lv)) def test_read_partial_video_bframes(self): # do not use lossless encoding, to test the presence of B-frames @@ -136,16 +135,16 @@ def test_read_partial_video_bframes(self): lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1]) s_data = data[start:(start + offset)] self.assertEqual(len(lv), offset) - assert_equal(s_data, lv, rtol=0.0, atol=self.TOLERANCE) + self.assertTrue((s_data.float() - lv.float()).abs().max() < self.TOLERANCE) lv, _, _ = io.read_video(f_name, pts[4] + 1, pts[7]) # TODO fix this if get_video_backend() == 'pyav': self.assertEqual(len(lv), 4) - assert_equal(data[4:8], lv, rtol=0.0, atol=self.TOLERANCE) + self.assertTrue((data[4:8].float() - lv.float()).abs().max() < self.TOLERANCE) else: self.assertEqual(len(lv), 3) - assert_equal(data[5:8], lv, rtol=0.0, atol=self.TOLERANCE) + self.assertTrue((data[5:8].float() - lv.float()).abs().max() < self.TOLERANCE) def test_read_packed_b_frames_divx_file(self): name = "hmdb51_Turnk_r_Pippi_Michel_cartwheel_f_cm_np2_le_med_6.avi" @@ -176,7 +175,7 @@ def test_read_video_pts_unit_sec(self): with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data): lv, _, info = io.read_video(f_name, pts_unit='sec') - assert_equal(data, lv) + self.assertTrue(data.equal(lv)) self.assertEqual(info["video_fps"], 5) self.assertEqual(info, {"video_fps": 5}) @@ -202,7 +201,7 @@ def test_read_partial_video_pts_unit_sec(self): lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1], pts_unit='sec') s_data = data[start:(start + offset)] self.assertEqual(len(lv), offset) - assert_equal(s_data, lv) + self.assertTrue(s_data.equal(lv)) container = av.open(f_name) stream = container.streams[0] @@ -213,7 +212,7 @@ def test_read_partial_video_pts_unit_sec(self): # for "video_reader" backend, we don't decode the closest early frame # when the given start pts is not matching any frame pts self.assertEqual(len(lv), 4) - assert_equal(data[4:8], lv) + self.assertTrue(data[4:8].equal(lv)) container.close() def test_read_video_corrupted_file(self): @@ -252,10 +251,9 @@ def test_read_video_partially_corrupted_file(self): else: self.assertEqual(len(video), 4) # but the valid decoded content is still correct - assert_equal(video[:3], data[:3]) + self.assertTrue(video[:3].equal(data[:3])) # and the last few frames are wrong - with self.assertRaises(AssertionError): - assert_equal(video, data) + self.assertFalse(video.equal(data)) @unittest.skipIf(sys.platform == 'win32', 'temporarily disabled on Windows') def test_write_video_with_audio(self): @@ -280,7 +278,7 @@ def test_write_video_with_audio(self): ) self.assertEqual(info["video_fps"], out_info["video_fps"]) - assert_equal(video_tensor, out_video_tensor) + self.assertTrue(video_tensor.equal(out_video_tensor)) audio_stream = av.open(f_name).streams.audio[0] out_audio_stream = av.open(out_f_name).streams.audio[0] diff --git a/test/test_models.py b/test/test_models.py index a86392cf8b6..401c4175ccf 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -120,7 +120,7 @@ def check_out(out): # predictions match. expected_file = self._get_expected_file(name) expected = torch.load(expected_file) - torch.testing.assert_close(out.argmax(dim=1), expected.argmax(dim=1), rtol=prec, atol=prec) + self.assertEqual(out.argmax(dim=1), expected.argmax(dim=1), prec=prec) return False # Partial validation performed return True # Full validation performed @@ -205,7 +205,7 @@ def compute_mean_std(tensor): # scores. expected_file = self._get_expected_file(name) expected = torch.load(expected_file) - torch.testing.assert_close(output[0]["scores"], expected[0]["scores"], rtol=prec, atol=prec) + self.assertEqual(output[0]["scores"], expected[0]["scores"], prec=prec) # Note: Fmassa proposed turning off NMS by adapting the threshold # and then using the Hungarian algorithm as in DETR to find the @@ -301,8 +301,10 @@ def test_memory_efficient_densenet(self): model2.eval() out2 = model2(x) + max_diff = (out1 - out2).abs().max() + self.assertTrue(num_params == num_grad) - torch.testing.assert_close(out1, out2, rtol=0.0, atol=1e-5) + self.assertTrue(max_diff < 1e-5) def test_resnet_dilation(self): # TODO improve tests to also check that each layer has the right dimensionality diff --git a/test/test_models_detection_anchor_utils.py b/test/test_models_detection_anchor_utils.py index 13c399a0c32..ed1b06b5f96 100644 --- a/test/test_models_detection_anchor_utils.py +++ b/test/test_models_detection_anchor_utils.py @@ -1,6 +1,5 @@ import torch from common_utils import TestCase -from _assert_utils import assert_equal from torchvision.models.detection.anchor_utils import AnchorGenerator, DefaultBoxGenerator from torchvision.models.detection.image_list import ImageList @@ -63,8 +62,8 @@ def test_anchor_generator(self): self.assertEqual(len(anchors), 2) self.assertEqual(tuple(anchors[0].shape), (9, 4)) self.assertEqual(tuple(anchors[1].shape), (9, 4)) - assert_equal(anchors[0], anchors_output) - assert_equal(anchors[1], anchors_output) + self.assertEqual(anchors[0], anchors_output) + self.assertEqual(anchors[1], anchors_output) def test_defaultbox_generator(self): images = torch.zeros(2, 3, 15, 15) @@ -86,5 +85,5 @@ def test_defaultbox_generator(self): self.assertEqual(len(dboxes), 2) self.assertEqual(tuple(dboxes[0].shape), (4, 4)) self.assertEqual(tuple(dboxes[1].shape), (4, 4)) - torch.testing.assert_close(dboxes[0], dboxes_output, rtol=1e-5, atol=1e-8) - torch.testing.assert_close(dboxes[1], dboxes_output, rtol=1e-5, atol=1e-8) + self.assertTrue(dboxes[0].allclose(dboxes_output)) + self.assertTrue(dboxes[1].allclose(dboxes_output)) diff --git a/test/test_models_detection_utils.py b/test/test_models_detection_utils.py index a20e0abc965..f61d825e0d8 100644 --- a/test/test_models_detection_utils.py +++ b/test/test_models_detection_utils.py @@ -4,7 +4,6 @@ from torchvision.models.detection.transform import GeneralizedRCNNTransform import unittest from torchvision.models.detection import backbone_utils -from _assert_utils import assert_equal class Tester(unittest.TestCase): @@ -56,8 +55,8 @@ def test_transform_copy_targets(self): targets = [{'boxes': torch.rand(3, 4)}, {'boxes': torch.rand(2, 4)}] targets_copy = copy.deepcopy(targets) out = transform(image, targets) # noqa: F841 - assert_equal(targets[0]['boxes'], targets_copy[0]['boxes']) - assert_equal(targets[1]['boxes'], targets_copy[1]['boxes']) + self.assertTrue(torch.equal(targets[0]['boxes'], targets_copy[0]['boxes'])) + self.assertTrue(torch.equal(targets[1]['boxes'], targets_copy[1]['boxes'])) def test_not_float_normalize(self): transform = GeneralizedRCNNTransform(300, 500, torch.zeros(3), torch.ones(3)) diff --git a/test/test_onnx.py b/test/test_onnx.py index d0140c79dfc..63f182004b8 100644 --- a/test/test_onnx.py +++ b/test/test_onnx.py @@ -7,7 +7,6 @@ onnxruntime = None from common_utils import set_rng_seed -from _assert_utils import assert_equal import io import torch from torchvision import ops @@ -484,8 +483,8 @@ def test_heatmaps_to_keypoints(self): jit_trace = torch.jit.trace(heatmaps_to_keypoints, (maps, rois)) out_trace = jit_trace(maps, rois) - assert_equal(out[0], out_trace[0]) - assert_equal(out[1], out_trace[1]) + assert torch.all(out[0].eq(out_trace[0])) + assert torch.all(out[1].eq(out_trace[1])) maps2 = torch.rand(20, 2, 21, 21) rois2 = torch.rand(20, 4) @@ -493,8 +492,8 @@ def test_heatmaps_to_keypoints(self): out2 = heatmaps_to_keypoints(maps2, rois2) out_trace2 = jit_trace(maps2, rois2) - assert_equal(out2[0], out_trace2[0]) - assert_equal(out2[1], out_trace2[1]) + assert torch.all(out2[0].eq(out_trace2[0])) + assert torch.all(out2[1].eq(out_trace2[1])) def test_keypoint_rcnn(self): images, test_images = self.get_test_images() diff --git a/test/test_ops.py b/test/test_ops.py index 88bcf7f540f..2e9fac8bc42 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -1,5 +1,4 @@ from common_utils import needs_cuda, cpu_only -from _assert_utils import assert_equal import math import unittest import pytest @@ -79,8 +78,7 @@ def _test_forward(self, device, contiguous, x_dtype=None, rois_dtype=None, **kwa sampling_ratio=-1, device=device, dtype=self.dtype, **kwargs) tol = 1e-3 if (x_dtype is torch.half or rois_dtype is torch.half) else 1e-5 - torch.testing.assert_close(gt_y.to(y), y, rtol=tol, atol=tol) - # self.assertTrue(torch.allclose(gt_y.to(y.dtype), y, )) + self.assertTrue(torch.allclose(gt_y.to(y.dtype), y, rtol=tol, atol=tol)) def _test_backward(self, device, contiguous): pool_size = 2 @@ -365,7 +363,7 @@ def make_rois(num_rois=1000): abs_diff = torch.abs(qy[diff_idx].dequantize() - quantized_float_y[diff_idx].dequantize()) t_scale = torch.full_like(abs_diff, fill_value=scale) - torch.testing.assert_close(abs_diff, t_scale, rtol=1e-5, atol=1e-5) + self.assertTrue(torch.allclose(abs_diff, t_scale, atol=1e-5)) x = torch.randint(50, 100, size=(2, 3, 10, 10)).to(dtype) qx = torch.quantize_per_tensor(x, scale=1, zero_point=0, dtype=torch.qint8) @@ -557,7 +555,7 @@ def test_nms_cuda_float16(self): iou_thres = 0.2 keep32 = ops.nms(boxes, scores, iou_thres) keep16 = ops.nms(boxes.to(torch.float16), scores.to(torch.float16), iou_thres) - assert_equal(keep32, keep16) + assert torch.all(torch.eq(keep32, keep16)) @cpu_only def test_batched_nms_implementations(self): @@ -575,13 +573,12 @@ def test_batched_nms_implementations(self): keep_vanilla = ops.boxes._batched_nms_vanilla(boxes, scores, idxs, iou_threshold) keep_trick = ops.boxes._batched_nms_coordinate_trick(boxes, scores, idxs, iou_threshold) - torch.testing.assert_close( - keep_vanilla, keep_trick, msg="The vanilla and the trick implementation yield different nms outputs." - ) + err_msg = "The vanilla and the trick implementation yield different nms outputs." + assert torch.allclose(keep_vanilla, keep_trick), err_msg # Also make sure an empty tensor is returned if boxes is empty empty = torch.empty((0,), dtype=torch.int64) - torch.testing.assert_close(empty, ops.batched_nms(empty, None, None, None)) + assert torch.allclose(empty, ops.batched_nms(empty, None, None, None)) class DeformConvTester(OpTester, unittest.TestCase): @@ -693,17 +690,15 @@ def _test_forward_with_batchsize(self, device, contiguous, batch_sz, dtype): bias = layer.bias.data expected = self.expected_fn(x, weight, offset, mask, bias, stride=stride, padding=padding, dilation=dilation) - torch.testing.assert_close( - res.to(expected), expected, rtol=tol, atol=tol, msg='\nres:\n{}\nexpected:\n{}'.format(res, expected) - ) + self.assertTrue(torch.allclose(res.to(expected.dtype), expected, rtol=tol, atol=tol), + '\nres:\n{}\nexpected:\n{}'.format(res, expected)) # no modulation test res = layer(x, offset) expected = self.expected_fn(x, weight, offset, None, bias, stride=stride, padding=padding, dilation=dilation) - torch.testing.assert_close( - res.to(expected), expected, rtol=tol, atol=tol, msg='\nres:\n{}\nexpected:\n{}'.format(res, expected) - ) + self.assertTrue(torch.allclose(res.to(expected.dtype), expected, rtol=tol, atol=tol), + '\nres:\n{}\nexpected:\n{}'.format(res, expected)) # test for wrong sizes with self.assertRaises(RuntimeError): @@ -783,7 +778,7 @@ def test_compare_cpu_cuda_grads(self): else: self.assertTrue(init_weight.grad is not None) res_grads = init_weight.grad.to("cpu") - torch.testing.assert_close(true_cpu_grads, res_grads) + self.assertTrue(true_cpu_grads.allclose(res_grads)) @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") def test_autocast(self): @@ -817,14 +812,14 @@ def test_frozenbatchnorm2d_eps(self): bn = torch.nn.BatchNorm2d(sample_size[1]).eval() bn.load_state_dict(state_dict) # Difference is expected to fall in an acceptable range - torch.testing.assert_close(fbn(x), bn(x), rtol=1e-5, atol=1e-6) + self.assertTrue(torch.allclose(fbn(x), bn(x), atol=1e-6)) # Check computation for eps > 0 fbn = ops.misc.FrozenBatchNorm2d(sample_size[1], eps=1e-5) fbn.load_state_dict(state_dict, strict=False) bn = torch.nn.BatchNorm2d(sample_size[1], eps=1e-5).eval() bn.load_state_dict(state_dict) - torch.testing.assert_close(fbn(x), bn(x), rtol=1e-5, atol=1e-6) + self.assertTrue(torch.allclose(fbn(x), bn(x), atol=1e-6)) def test_frozenbatchnorm2d_n_arg(self): """Ensure a warning is thrown when passing `n` kwarg @@ -865,11 +860,20 @@ def test_bbox_same(self): exp_xyxy = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0], [10, 15, 30, 35], [23, 35, 93, 95]], dtype=torch.float) - assert_equal(ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="xyxy"), exp_xyxy) + box_same = ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="xyxy") + self.assertEqual(exp_xyxy.size(), torch.Size([4, 4])) + self.assertEqual(exp_xyxy.dtype, box_tensor.dtype) + assert torch.all(torch.eq(box_same, exp_xyxy)).item() - assert_equal(ops.box_convert(box_tensor, in_fmt="xywh", out_fmt="xywh"), exp_xyxy) + box_same = ops.box_convert(box_tensor, in_fmt="xywh", out_fmt="xywh") + self.assertEqual(exp_xyxy.size(), torch.Size([4, 4])) + self.assertEqual(exp_xyxy.dtype, box_tensor.dtype) + assert torch.all(torch.eq(box_same, exp_xyxy)).item() - assert_equal(ops.box_convert(box_tensor, in_fmt="cxcywh", out_fmt="cxcywh"), exp_xyxy) + box_same = ops.box_convert(box_tensor, in_fmt="cxcywh", out_fmt="cxcywh") + self.assertEqual(exp_xyxy.size(), torch.Size([4, 4])) + self.assertEqual(exp_xyxy.dtype, box_tensor.dtype) + assert torch.all(torch.eq(box_same, exp_xyxy)).item() def test_bbox_xyxy_xywh(self): # Simple test convert boxes to xywh and back. Make sure they are same. @@ -880,11 +884,15 @@ def test_bbox_xyxy_xywh(self): [10, 15, 20, 20], [23, 35, 70, 60]], dtype=torch.float) box_xywh = ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="xywh") - assert_equal(box_xywh, exp_xywh) + self.assertEqual(exp_xywh.size(), torch.Size([4, 4])) + self.assertEqual(exp_xywh.dtype, box_tensor.dtype) + assert torch.all(torch.eq(box_xywh, exp_xywh)).item() # Reverse conversion box_xyxy = ops.box_convert(box_xywh, in_fmt="xywh", out_fmt="xyxy") - assert_equal(box_xyxy, box_tensor) + self.assertEqual(box_xyxy.size(), torch.Size([4, 4])) + self.assertEqual(box_xyxy.dtype, box_tensor.dtype) + assert torch.all(torch.eq(box_xyxy, box_tensor)).item() def test_bbox_xyxy_cxcywh(self): # Simple test convert boxes to xywh and back. Make sure they are same. @@ -895,11 +903,15 @@ def test_bbox_xyxy_cxcywh(self): [20, 25, 20, 20], [58, 65, 70, 60]], dtype=torch.float) box_cxcywh = ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="cxcywh") - assert_equal(box_cxcywh, exp_cxcywh) + self.assertEqual(exp_cxcywh.size(), torch.Size([4, 4])) + self.assertEqual(exp_cxcywh.dtype, box_tensor.dtype) + assert torch.all(torch.eq(box_cxcywh, exp_cxcywh)).item() # Reverse conversion box_xyxy = ops.box_convert(box_cxcywh, in_fmt="cxcywh", out_fmt="xyxy") - assert_equal(box_xyxy, box_tensor) + self.assertEqual(box_xyxy.size(), torch.Size([4, 4])) + self.assertEqual(box_xyxy.dtype, box_tensor.dtype) + assert torch.all(torch.eq(box_xyxy, box_tensor)).item() def test_bbox_xywh_cxcywh(self): box_tensor = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0], @@ -910,11 +922,15 @@ def test_bbox_xywh_cxcywh(self): [20, 25, 20, 20], [58, 65, 70, 60]], dtype=torch.float) box_cxcywh = ops.box_convert(box_tensor, in_fmt="xywh", out_fmt="cxcywh") - assert_equal(box_cxcywh, exp_cxcywh) + self.assertEqual(exp_cxcywh.size(), torch.Size([4, 4])) + self.assertEqual(exp_cxcywh.dtype, box_tensor.dtype) + assert torch.all(torch.eq(box_cxcywh, exp_cxcywh)).item() # Reverse conversion box_xywh = ops.box_convert(box_cxcywh, in_fmt="cxcywh", out_fmt="xywh") - assert_equal(box_xywh, box_tensor) + self.assertEqual(box_xywh.size(), torch.Size([4, 4])) + self.assertEqual(box_xywh.dtype, box_tensor.dtype) + assert torch.all(torch.eq(box_xywh, box_tensor)).item() def test_bbox_invalid(self): box_tensor = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0], @@ -935,18 +951,19 @@ def test_bbox_convert_jit(self): box_xywh = ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="xywh") scripted_xywh = scripted_fn(box_tensor, 'xyxy', 'xywh') - torch.testing.assert_close(scripted_xywh, box_xywh, rtol=0.0, atol=TOLERANCE) + self.assertTrue((scripted_xywh - box_xywh).abs().max() < TOLERANCE) box_cxcywh = ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="cxcywh") scripted_cxcywh = scripted_fn(box_tensor, 'xyxy', 'cxcywh') - torch.testing.assert_close(scripted_cxcywh, box_cxcywh, rtol=0.0, atol=TOLERANCE) + self.assertTrue((scripted_cxcywh - box_cxcywh).abs().max() < TOLERANCE) class BoxAreaTester(unittest.TestCase): def test_box_area(self): def area_check(box, expected, tolerance=1e-4): out = ops.box_area(box) - torch.testing.assert_close(out, expected, rtol=0.0, check_dtype=False, atol=tolerance) + assert out.size() == expected.size() + assert ((out - expected).abs().max() < tolerance).item() # Check for int boxes for dtype in [torch.int8, torch.int16, torch.int32, torch.int64]: @@ -974,7 +991,8 @@ class BoxIouTester(unittest.TestCase): def test_iou(self): def iou_check(box, expected, tolerance=1e-4): out = ops.box_iou(box, box) - torch.testing.assert_close(out, expected, rtol=0.0, check_dtype=False, atol=tolerance) + assert out.size() == expected.size() + assert ((out - expected).abs().max() < tolerance).item() # Check for int boxes for dtype in [torch.int16, torch.int32, torch.int64]: @@ -995,7 +1013,8 @@ class GenBoxIouTester(unittest.TestCase): def test_gen_iou(self): def gen_iou_check(box, expected, tolerance=1e-4): out = ops.generalized_box_iou(box, box) - torch.testing.assert_close(out, expected, rtol=0.0, check_dtype=False, atol=tolerance) + assert out.size() == expected.size() + assert ((out - expected).abs().max() < tolerance).item() # Check for int boxes for dtype in [torch.int16, torch.int32, torch.int64]: diff --git a/test/test_transforms.py b/test/test_transforms.py index 255cca8eae0..9402a37bc35 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -22,7 +22,6 @@ stats = None from common_utils import cycle_over, int_dtypes, float_dtypes -from _assert_utils import assert_equal GRACE_HOPPER = get_file_path_2( @@ -103,10 +102,8 @@ def test_center_crop_2(self): "image_size: {} crop_size: {}".format(input_image_size, crop_size)) # Ensure output for PIL and Tensor are equal - assert_equal( - output_tensor, output_pil, check_stride=False, - msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) - ) + self.assertEqual((output_tensor - output_pil).sum(), 0, + "image_size: {} crop_size: {}".format(input_image_size, crop_size)) # Check if content in center of both image and cropped output is same. center_size = (min(crop_size[0], input_image_size[0]), min(crop_size[1], input_image_size[1])) @@ -129,10 +126,8 @@ def test_center_crop_2(self): input_center_tl[1]:input_center_tl[1] + center_size[1] ] - assert_equal( - output_center, img_center, check_stride=False, - msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) - ) + self.assertEqual((output_center - img_center).sum(), 0, + "image_size: {} crop_size: {}".format(input_image_size, crop_size)) def test_five_crop(self): to_pil_image = transforms.ToPILImage() @@ -387,7 +382,7 @@ def test_random_crop(self): ])(img) self.assertEqual(result.size(1), height) self.assertEqual(result.size(2), width) - torch.testing.assert_close(result, img) + self.assertTrue(np.allclose(img.numpy(), result.numpy())) result = transforms.Compose([ transforms.ToPILImage(), @@ -419,14 +414,8 @@ def test_pad(self): # to the pad value fill_v = fill / 255 eps = 1e-5 - h_padded = result[:, :padding, :] - w_padded = result[:, :, :padding] - torch.testing.assert_close( - h_padded, torch.full_like(h_padded, fill_value=fill_v), check_stride=False, rtol=0.0, atol=eps - ) - torch.testing.assert_close( - w_padded, torch.full_like(w_padded, fill_value=fill_v), check_stride=False, rtol=0.0, atol=eps - ) + self.assertTrue((result[:, :padding, :] - fill_v).abs().max() < eps) + self.assertTrue((result[:, :, :padding] - fill_v).abs().max() < eps) self.assertRaises(ValueError, transforms.Pad(padding, fill=(1, 2)), transforms.ToPILImage()(img)) @@ -459,7 +448,7 @@ def test_pad_with_non_constant_padding_modes(self): # First 6 elements of leftmost edge in the middle of the image, values are in order: # edge_pad, edge_pad, edge_pad, constant_pad, constant value added to leftmost edge, 0 edge_middle_slice = np.asarray(edge_padded_img).transpose(2, 0, 1)[0][17][:6] - assert_equal(edge_middle_slice, np.asarray([200, 200, 200, 200, 1, 0]), check_dtype=False, check_stride=False) + self.assertTrue(np.all(edge_middle_slice == np.asarray([200, 200, 200, 200, 1, 0]))) self.assertEqual(transforms.ToTensor()(edge_padded_img).size(), (3, 35, 35)) # Pad 3 to left/right, 2 to top/bottom @@ -467,7 +456,7 @@ def test_pad_with_non_constant_padding_modes(self): # First 6 elements of leftmost edge in the middle of the image, values are in order: # reflect_pad, reflect_pad, reflect_pad, constant_pad, constant value added to leftmost edge, 0 reflect_middle_slice = np.asarray(reflect_padded_img).transpose(2, 0, 1)[0][17][:6] - assert_equal(reflect_middle_slice, np.asarray([0, 0, 1, 200, 1, 0]), check_dtype=False, check_stride=False) + self.assertTrue(np.all(reflect_middle_slice == np.asarray([0, 0, 1, 200, 1, 0]))) self.assertEqual(transforms.ToTensor()(reflect_padded_img).size(), (3, 33, 35)) # Pad 3 to left, 2 to top, 2 to right, 1 to bottom @@ -475,7 +464,7 @@ def test_pad_with_non_constant_padding_modes(self): # First 6 elements of leftmost edge in the middle of the image, values are in order: # sym_pad, sym_pad, sym_pad, constant_pad, constant value added to leftmost edge, 0 symmetric_middle_slice = np.asarray(symmetric_padded_img).transpose(2, 0, 1)[0][17][:6] - assert_equal(symmetric_middle_slice, np.asarray([0, 1, 200, 200, 1, 0]), check_dtype=False, check_stride=False) + self.assertTrue(np.all(symmetric_middle_slice == np.asarray([0, 1, 200, 200, 1, 0]))) self.assertEqual(transforms.ToTensor()(symmetric_padded_img).size(), (3, 32, 34)) # Check negative padding explicitly for symmetric case, since it is not @@ -484,8 +473,8 @@ def test_pad_with_non_constant_padding_modes(self): symmetric_padded_img_neg = F.pad(img, (-1, 2, 3, -3), padding_mode='symmetric') symmetric_neg_middle_left = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][:3] symmetric_neg_middle_right = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][-4:] - assert_equal(symmetric_neg_middle_left, np.asarray([1, 0, 0]), check_dtype=False, check_stride=False) - assert_equal(symmetric_neg_middle_right, np.asarray([200, 200, 0, 0]), check_dtype=False, check_stride=False) + self.assertTrue(np.all(symmetric_neg_middle_left == np.asarray([1, 0, 0]))) + self.assertTrue(np.all(symmetric_neg_middle_right == np.asarray([200, 200, 0, 0]))) self.assertEqual(transforms.ToTensor()(symmetric_padded_img_neg).size(), (3, 28, 31)) def test_pad_raises_with_invalid_pad_sequence_len(self): @@ -510,12 +499,12 @@ def test_lambda(self): trans = transforms.Lambda(lambda x: x.add(10)) x = torch.randn(10) y = trans(x) - assert_equal(y, torch.add(x, 10)) + self.assertTrue(y.equal(torch.add(x, 10))) trans = transforms.Lambda(lambda x: x.add_(10)) x = torch.randn(10) y = trans(x) - assert_equal(y, x) + self.assertTrue(y.equal(x)) # Checking if Lambda can be printed as string trans.__repr__() @@ -624,25 +613,23 @@ def test_to_tensor(self): input_data = torch.ByteTensor(channels, height, width).random_(0, 255).float().div_(255) img = transforms.ToPILImage()(input_data) output = trans(img) - torch.testing.assert_close(output, input_data, check_stride=False) + self.assertTrue(np.allclose(input_data.numpy(), output.numpy())) ndarray = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8) output = trans(ndarray) expected_output = ndarray.transpose((2, 0, 1)) / 255.0 - torch.testing.assert_close(output, torch.as_tensor(expected_output), - check_stride=False, check_dtype=False) + self.assertTrue(np.allclose(output.numpy(), expected_output)) ndarray = np.random.rand(height, width, channels).astype(np.float32) output = trans(ndarray) expected_output = ndarray.transpose((2, 0, 1)) - torch.testing.assert_close(output, torch.as_tensor(expected_output), - check_stride=False, check_dtype=False) + self.assertTrue(np.allclose(output.numpy(), expected_output)) # separate test for mode '1' PIL images input_data = torch.ByteTensor(1, height, width).bernoulli_() img = transforms.ToPILImage()(input_data.mul(255)).convert('1') output = trans(img) - torch.testing.assert_close(input_data, output, check_dtype=False, check_stride=False) + self.assertTrue(np.allclose(input_data.numpy(), output.numpy())) def test_to_tensor_with_other_default_dtypes(self): current_def_dtype = torch.get_default_dtype() @@ -678,7 +665,8 @@ def test_convert_image_dtype_float_to_float(self): output_image = transform(input_image) output_image_script = transform_script(input_image, output_dtype) - torch.testing.assert_close(output_image_script, output_image, rtol=0.0, atol=1e-6) + script_diff = output_image_script - output_image + self.assertLess(script_diff.abs().max(), 1e-6) actual_min, actual_max = output_image.tolist() desired_min, desired_max = 0.0, 1.0 @@ -703,7 +691,8 @@ def test_convert_image_dtype_float_to_int(self): output_image = transform(input_image) output_image_script = transform_script(input_image, output_dtype) - torch.testing.assert_close(output_image_script, output_image, rtol=0.0, atol=1e-6) + script_diff = output_image_script - output_image + self.assertLess(script_diff.abs().max(), 1e-6) actual_min, actual_max = output_image.tolist() desired_min, desired_max = 0, torch.iinfo(output_dtype).max @@ -722,7 +711,8 @@ def test_convert_image_dtype_int_to_float(self): output_image = transform(input_image) output_image_script = transform_script(input_image, output_dtype) - torch.testing.assert_close(output_image_script, output_image, rtol=0.0, atol=1e-6) + script_diff = output_image_script - output_image + self.assertLess(script_diff.abs().max(), 1e-6) actual_min, actual_max = output_image.tolist() desired_min, desired_max = 0.0, 1.0 @@ -746,12 +736,9 @@ def test_convert_image_dtype_int_to_int(self): output_image = transform(input_image) output_image_script = transform_script(input_image, output_dtype) - torch.testing.assert_close( - output_image_script, - output_image, - rtol=0.0, - atol=1e-6, - msg="{} vs {}".format(output_image_script, output_image), + script_diff = output_image_script.float() - output_image.float() + self.assertLess( + script_diff.abs().max(), 1e-6, msg="{} vs {}".format(output_image_script, output_image) ) actual_min, actual_max = output_image.tolist() @@ -793,7 +780,8 @@ def test_accimage_to_tensor(self): expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB')) output = trans(accimage.Image(GRACE_HOPPER)) - torch.testing.assert_close(output, expected_output) + self.assertEqual(expected_output.size(), output.size()) + self.assertTrue(np.allclose(output.numpy(), expected_output.numpy())) def test_pil_to_tensor(self): test_channels = [1, 3, 4] @@ -808,25 +796,25 @@ def test_pil_to_tensor(self): input_data = torch.ByteTensor(channels, height, width).random_(0, 255) img = transforms.ToPILImage()(input_data) output = trans(img) - torch.testing.assert_close(input_data, output, check_stride=False) + self.assertTrue(np.allclose(input_data.numpy(), output.numpy())) input_data = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8) img = transforms.ToPILImage()(input_data) output = trans(img) expected_output = input_data.transpose((2, 0, 1)) - torch.testing.assert_close(output.numpy(), expected_output) + self.assertTrue(np.allclose(output.numpy(), expected_output)) input_data = torch.as_tensor(np.random.rand(channels, height, width).astype(np.float32)) img = transforms.ToPILImage()(input_data) # CHW -> HWC and (* 255).byte() output = trans(img) # HWC -> CHW expected_output = (input_data * 255).byte() - torch.testing.assert_close(output, expected_output, check_stride=False) + self.assertTrue(np.allclose(output.numpy(), expected_output.numpy())) # separate test for mode '1' PIL images input_data = torch.ByteTensor(1, height, width).bernoulli_() img = transforms.ToPILImage()(input_data.mul(255)).convert('1') - output = trans(img).view(torch.uint8).bool().to(torch.uint8) - torch.testing.assert_close(input_data, output, check_stride=False) + output = trans(img) + self.assertTrue(np.allclose(input_data.numpy(), output.numpy())) @unittest.skipIf(accimage is None, 'accimage not available') def test_accimage_pil_to_tensor(self): @@ -836,7 +824,7 @@ def test_accimage_pil_to_tensor(self): output = trans(accimage.Image(GRACE_HOPPER)) self.assertEqual(expected_output.size(), output.size()) - torch.testing.assert_close(output, expected_output) + self.assertTrue(np.allclose(output.numpy(), expected_output.numpy())) @unittest.skipIf(accimage is None, 'accimage not available') def test_accimage_resize(self): @@ -871,7 +859,7 @@ def test_accimage_crop(self): output = trans(accimage.Image(GRACE_HOPPER)) self.assertEqual(expected_output.size(), output.size()) - torch.testing.assert_close(output, expected_output) + self.assertTrue(np.allclose(output.numpy(), expected_output.numpy())) def test_1_channel_tensor_to_pil_image(self): to_tensor = transforms.ToTensor() @@ -892,13 +880,12 @@ def test_1_channel_tensor_to_pil_image(self): for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: img = transform(img_data) self.assertEqual(img.mode, mode) - torch.testing.assert_close(expected_output, to_tensor(img).numpy(), check_stride=False) + self.assertTrue(np.allclose(expected_output, to_tensor(img).numpy())) # 'F' mode for torch.FloatTensor img_F_mode = transforms.ToPILImage(mode='F')(img_data_float) self.assertEqual(img_F_mode.mode, 'F') - torch.testing.assert_close( - np.array(Image.fromarray(img_data_float.squeeze(0).numpy(), mode='F')), np.array(img_F_mode) - ) + self.assertTrue(np.allclose(np.array(Image.fromarray(img_data_float.squeeze(0).numpy(), mode='F')), + np.array(img_F_mode))) def test_1_channel_ndarray_to_pil_image(self): img_data_float = torch.Tensor(4, 4, 1).uniform_().numpy() @@ -912,7 +899,7 @@ def test_1_channel_ndarray_to_pil_image(self): for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: img = transform(img_data) self.assertEqual(img.mode, mode) - torch.testing.assert_close(img_data[:, :, 0], np.asarray(img).astype(img_data.dtype)) + self.assertTrue(np.allclose(img_data[:, :, 0], img)) def test_2_channel_ndarray_to_pil_image(self): def verify_img_data(img_data, mode): @@ -924,7 +911,7 @@ def verify_img_data(img_data, mode): self.assertEqual(img.mode, mode) split = img.split() for i in range(2): - torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) + self.assertTrue(np.allclose(img_data[:, :, i], split[i])) img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy() for mode in [None, 'LA']: @@ -997,7 +984,7 @@ def verify_img_data(img_data, mode): self.assertEqual(img.mode, mode) split = img.split() for i in range(3): - torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) + self.assertTrue(np.allclose(img_data[:, :, i], split[i])) img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy() for mode in [None, 'RGB', 'HSV', 'YCbCr']: @@ -1046,7 +1033,7 @@ def verify_img_data(img_data, mode): self.assertEqual(img.mode, mode) split = img.split() for i in range(4): - torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) + self.assertTrue(np.allclose(img_data[:, :, i], split[i])) img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy() for mode in [None, 'RGBA', 'CMYK', 'RGBX']: @@ -1077,7 +1064,7 @@ def test_2d_tensor_to_pil_image(self): for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: img = transform(img_data) self.assertEqual(img.mode, mode) - np.testing.assert_allclose(expected_output, to_tensor(img).numpy()[0]) + self.assertTrue(np.allclose(expected_output, to_tensor(img).numpy())) def test_2d_ndarray_to_pil_image(self): img_data_float = torch.Tensor(4, 4).uniform_().numpy() @@ -1091,7 +1078,7 @@ def test_2d_ndarray_to_pil_image(self): for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: img = transform(img_data) self.assertEqual(img.mode, mode) - np.testing.assert_allclose(img_data, img) + self.assertTrue(np.allclose(img_data, img)) def test_tensor_bad_types_to_pil_image(self): with self.assertRaisesRegex(ValueError, r'pic should be 2/3 dimensional. Got \d+ dimensions.'): @@ -1202,7 +1189,7 @@ def samples_from_standard_normal(tensor): # Checking the optional in-place behaviour tensor = torch.rand((1, 16, 16)) tensor_inplace = transforms.Normalize((0.5,), (0.5,), inplace=True)(tensor) - assert_equal(tensor, tensor_inplace) + self.assertTrue(torch.equal(tensor, tensor_inplace)) def test_normalize_different_dtype(self): for dtype1 in [torch.float32, torch.float64]: @@ -1228,8 +1215,8 @@ def test_normalize_3d_tensor(self): result2 = F.normalize(img, mean_unsqueezed.repeat(1, img_size, img_size), std_unsqueezed.repeat(1, img_size, img_size)) - torch.testing.assert_close(target, result1.numpy()) - torch.testing.assert_close(target, result2.numpy()) + assert_array_almost_equal(target, result1.numpy()) + assert_array_almost_equal(target, result2.numpy()) def test_adjust_brightness(self): x_shape = [2, 2, 3] @@ -1240,21 +1227,21 @@ def test_adjust_brightness(self): # test 0 y_pil = F.adjust_brightness(x_pil, 1) y_np = np.array(y_pil) - torch.testing.assert_close(y_np, x_np) + self.assertTrue(np.allclose(y_np, x_np)) # test 1 y_pil = F.adjust_brightness(x_pil, 0.5) y_np = np.array(y_pil) y_ans = [0, 2, 6, 27, 67, 113, 18, 4, 117, 45, 127, 0] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - torch.testing.assert_close(y_np, y_ans) + self.assertTrue(np.allclose(y_np, y_ans)) # test 2 y_pil = F.adjust_brightness(x_pil, 2) y_np = np.array(y_pil) y_ans = [0, 10, 26, 108, 255, 255, 74, 16, 255, 180, 255, 2] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - torch.testing.assert_close(y_np, y_ans) + self.assertTrue(np.allclose(y_np, y_ans)) def test_adjust_contrast(self): x_shape = [2, 2, 3] @@ -1265,21 +1252,21 @@ def test_adjust_contrast(self): # test 0 y_pil = F.adjust_contrast(x_pil, 1) y_np = np.array(y_pil) - torch.testing.assert_close(y_np, x_np) + self.assertTrue(np.allclose(y_np, x_np)) # test 1 y_pil = F.adjust_contrast(x_pil, 0.5) y_np = np.array(y_pil) y_ans = [43, 45, 49, 70, 110, 156, 61, 47, 160, 88, 170, 43] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - torch.testing.assert_close(y_np, y_ans) + self.assertTrue(np.allclose(y_np, y_ans)) # test 2 y_pil = F.adjust_contrast(x_pil, 2) y_np = np.array(y_pil) y_ans = [0, 0, 0, 22, 184, 255, 0, 0, 255, 94, 255, 0] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - torch.testing.assert_close(y_np, y_ans) + self.assertTrue(np.allclose(y_np, y_ans)) @unittest.skipIf(Image.__version__ >= '7', "Temporarily disabled") def test_adjust_saturation(self): @@ -1291,21 +1278,21 @@ def test_adjust_saturation(self): # test 0 y_pil = F.adjust_saturation(x_pil, 1) y_np = np.array(y_pil) - torch.testing.assert_close(y_np, x_np) + self.assertTrue(np.allclose(y_np, x_np)) # test 1 y_pil = F.adjust_saturation(x_pil, 0.5) y_np = np.array(y_pil) y_ans = [2, 4, 8, 87, 128, 173, 39, 25, 138, 133, 215, 88] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - torch.testing.assert_close(y_np, y_ans) + self.assertTrue(np.allclose(y_np, y_ans)) # test 2 y_pil = F.adjust_saturation(x_pil, 2) y_np = np.array(y_pil) y_ans = [0, 6, 22, 0, 149, 255, 32, 0, 255, 4, 255, 0] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - torch.testing.assert_close(y_np, y_ans) + self.assertTrue(np.allclose(y_np, y_ans)) def test_adjust_hue(self): x_shape = [2, 2, 3] @@ -1323,21 +1310,21 @@ def test_adjust_hue(self): y_np = np.array(y_pil) y_ans = [0, 5, 13, 54, 139, 226, 35, 8, 234, 91, 255, 1] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - torch.testing.assert_close(y_np, y_ans) + self.assertTrue(np.allclose(y_np, y_ans)) # test 1 y_pil = F.adjust_hue(x_pil, 0.25) y_np = np.array(y_pil) y_ans = [13, 0, 12, 224, 54, 226, 234, 8, 99, 1, 222, 255] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - torch.testing.assert_close(y_np, y_ans) + self.assertTrue(np.allclose(y_np, y_ans)) # test 2 y_pil = F.adjust_hue(x_pil, -0.25) y_np = np.array(y_pil) y_ans = [0, 13, 2, 54, 226, 58, 8, 234, 152, 255, 43, 1] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - torch.testing.assert_close(y_np, y_ans) + self.assertTrue(np.allclose(y_np, y_ans)) def test_adjust_sharpness(self): x_shape = [4, 4, 3] @@ -1350,7 +1337,7 @@ def test_adjust_sharpness(self): # test 0 y_pil = F.adjust_sharpness(x_pil, 1) y_np = np.array(y_pil) - torch.testing.assert_close(y_np, x_np) + self.assertTrue(np.allclose(y_np, x_np)) # test 1 y_pil = F.adjust_sharpness(x_pil, 0.5) @@ -1359,7 +1346,7 @@ def test_adjust_sharpness(self): 30, 74, 103, 96, 114, 97, 110, 100, 101, 114, 32, 81, 103, 108, 102, 101, 107, 116, 105, 115, 0, 0, 73, 32, 108, 111, 118, 101, 32, 121, 111, 117] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - torch.testing.assert_close(y_np, y_ans) + self.assertTrue(np.allclose(y_np, y_ans)) # test 2 y_pil = F.adjust_sharpness(x_pil, 2) @@ -1368,7 +1355,7 @@ def test_adjust_sharpness(self): 0, 46, 118, 111, 132, 97, 110, 100, 101, 114, 32, 95, 135, 146, 126, 112, 119, 116, 105, 115, 0, 0, 73, 32, 108, 111, 118, 101, 32, 121, 111, 117] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - torch.testing.assert_close(y_np, y_ans) + self.assertTrue(np.allclose(y_np, y_ans)) # test 3 x_shape = [2, 2, 3] @@ -1379,7 +1366,7 @@ def test_adjust_sharpness(self): y_pil = F.adjust_sharpness(x_pil, 2) y_np = np.array(y_pil).transpose(2, 0, 1) y_th = F.adjust_sharpness(x_th, 2) - torch.testing.assert_close(y_np, y_th.numpy()) + self.assertTrue(np.allclose(y_np, y_th.numpy())) def test_adjust_gamma(self): x_shape = [2, 2, 3] @@ -1390,21 +1377,21 @@ def test_adjust_gamma(self): # test 0 y_pil = F.adjust_gamma(x_pil, 1) y_np = np.array(y_pil) - torch.testing.assert_close(y_np, x_np) + self.assertTrue(np.allclose(y_np, x_np)) # test 1 y_pil = F.adjust_gamma(x_pil, 0.5) y_np = np.array(y_pil) y_ans = [0, 35, 57, 117, 186, 241, 97, 45, 245, 152, 255, 16] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - torch.testing.assert_close(y_np, y_ans) + self.assertTrue(np.allclose(y_np, y_ans)) # test 2 y_pil = F.adjust_gamma(x_pil, 2) y_np = np.array(y_pil) y_ans = [0, 0, 0, 11, 71, 201, 5, 0, 215, 31, 255, 0] y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) - torch.testing.assert_close(y_np, y_ans) + self.assertTrue(np.allclose(y_np, y_ans)) def test_adjusts_L_mode(self): x_shape = [2, 2, 3] @@ -1463,10 +1450,10 @@ def test_linear_transformation(self): cov += np.dot(xwhite, xwhite.T) / num_features mean += np.sum(xwhite) / num_features # if rtol for std = 1e-3 then rtol for cov = 2e-3 as std**2 = cov - torch.testing.assert_close(cov / num_samples, np.identity(1), rtol=2e-3, atol=1e-8, check_dtype=False, - msg="cov not close to 1") - torch.testing.assert_close(mean / num_samples, 0, rtol=1e-3, atol=1e-8, check_dtype=False, - msg="mean not close to 1") + self.assertTrue(np.allclose(cov / num_samples, np.identity(1), rtol=2e-3), + "cov not close to 1") + self.assertTrue(np.allclose(mean / num_samples, 0, rtol=1e-3), + "mean not close to 0") # Checking if LinearTransformation can be printed as string whitening.__repr__() @@ -1504,7 +1491,7 @@ def test_rotate(self): result_a = F.rotate(img, 90) result_b = F.rotate(img, -270) - assert_equal(np.array(result_a), np.array(result_b)) + self.assertTrue(np.all(np.array(result_a) == np.array(result_b))) def test_rotate_fill(self): img = F.to_pil_image(np.ones((100, 100, 3), dtype=np.uint8) * 255, "RGB") @@ -1745,7 +1732,7 @@ def test_to_grayscale(self): gray_np_1 = np.array(gray_pil_1) self.assertEqual(gray_pil_1.mode, 'L', 'mode should be L') self.assertEqual(gray_np_1.shape, tuple(x_shape[0:2]), 'should be 1 channel') - assert_equal(gray_np, gray_np_1) + np.testing.assert_equal(gray_np, gray_np_1) # Case 2: RGB -> 3 channel grayscale trans2 = transforms.Grayscale(num_output_channels=3) @@ -1753,9 +1740,9 @@ def test_to_grayscale(self): gray_np_2 = np.array(gray_pil_2) self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB') self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel') - assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) - assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) - assert_equal(gray_np, gray_np_2[:, :, 0], check_stride=False) + np.testing.assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) + np.testing.assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) + np.testing.assert_equal(gray_np, gray_np_2[:, :, 0]) # Case 3: 1 channel grayscale -> 1 channel grayscale trans3 = transforms.Grayscale(num_output_channels=1) @@ -1763,7 +1750,7 @@ def test_to_grayscale(self): gray_np_3 = np.array(gray_pil_3) self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L') self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel') - assert_equal(gray_np, gray_np_3) + np.testing.assert_equal(gray_np, gray_np_3) # Case 4: 1 channel grayscale -> 3 channel grayscale trans4 = transforms.Grayscale(num_output_channels=3) @@ -1771,9 +1758,9 @@ def test_to_grayscale(self): gray_np_4 = np.array(gray_pil_4) self.assertEqual(gray_pil_4.mode, 'RGB', 'mode should be RGB') self.assertEqual(gray_np_4.shape, tuple(x_shape), 'should be 3 channel') - assert_equal(gray_np_4[:, :, 0], gray_np_4[:, :, 1]) - assert_equal(gray_np_4[:, :, 1], gray_np_4[:, :, 2]) - assert_equal(gray_np, gray_np_4[:, :, 0], check_stride=False) + np.testing.assert_equal(gray_np_4[:, :, 0], gray_np_4[:, :, 1]) + np.testing.assert_equal(gray_np_4[:, :, 1], gray_np_4[:, :, 2]) + np.testing.assert_equal(gray_np, gray_np_4[:, :, 0]) # Checking if Grayscale can be printed as string trans4.__repr__() @@ -1840,9 +1827,9 @@ def test_random_grayscale(self): gray_np_2 = np.array(gray_pil_2) self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB') self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel') - assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) - assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) - assert_equal(gray_np, gray_np_2[:, :, 0], check_stride=False) + np.testing.assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) + np.testing.assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) + np.testing.assert_equal(gray_np, gray_np_2[:, :, 0]) # Case 3b: RGB -> 3 channel grayscale (unchanged) trans2 = transforms.RandomGrayscale(p=0.0) @@ -1850,7 +1837,7 @@ def test_random_grayscale(self): gray_np_2 = np.array(gray_pil_2) self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB') self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel') - assert_equal(x_np, gray_np_2) + np.testing.assert_equal(x_np, gray_np_2) # Case 3c: 1 channel grayscale -> 1 channel grayscale (grayscaled) trans3 = transforms.RandomGrayscale(p=1.0) @@ -1858,7 +1845,7 @@ def test_random_grayscale(self): gray_np_3 = np.array(gray_pil_3) self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L') self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel') - assert_equal(gray_np, gray_np_3) + np.testing.assert_equal(gray_np, gray_np_3) # Case 3d: 1 channel grayscale -> 1 channel grayscale (unchanged) trans3 = transforms.RandomGrayscale(p=0.0) @@ -1866,7 +1853,7 @@ def test_random_grayscale(self): gray_np_3 = np.array(gray_pil_3) self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L') self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel') - assert_equal(gray_np, gray_np_3) + np.testing.assert_equal(gray_np, gray_np_3) # Checking if RandomGrayscale can be printed as string trans3.__repr__() diff --git a/test/test_transforms_tensor.py b/test/test_transforms_tensor.py index 0d5e365351d..2c598e90833 100644 --- a/test/test_transforms_tensor.py +++ b/test/test_transforms_tensor.py @@ -10,7 +10,6 @@ from typing import Sequence from common_utils import TransformsTester, get_tmp_dir, int_dtypes, float_dtypes -from _assert_utils import assert_equal NEAREST, BILINEAR, BICUBIC = InterpolationMode.NEAREST, InterpolationMode.BILINEAR, InterpolationMode.BICUBIC @@ -39,7 +38,7 @@ def _test_transform_vs_scripted(self, transform, s_transform, tensor, msg=None): out1 = transform(tensor) torch.manual_seed(12) out2 = s_transform(tensor) - assert_equal(out1, out2, msg=msg) + self.assertTrue(out1.equal(out2), msg=msg) def _test_transform_vs_scripted_on_batch(self, transform, s_transform, batch_tensors, msg=None): torch.manual_seed(12) @@ -49,11 +48,11 @@ def _test_transform_vs_scripted_on_batch(self, transform, s_transform, batch_ten img_tensor = batch_tensors[i, ...] torch.manual_seed(12) transformed_img = transform(img_tensor) - assert_equal(transformed_img, transformed_batch[i, ...], msg=msg) + self.assertTrue(transformed_img.equal(transformed_batch[i, ...]), msg=msg) torch.manual_seed(12) s_transformed_batch = s_transform(batch_tensors) - assert_equal(transformed_batch, s_transformed_batch, msg=msg) + self.assertTrue(transformed_batch.equal(s_transformed_batch), msg=msg) def _test_class_op(self, method, meth_kwargs=None, test_exact_match=True, **match_kwargs): if meth_kwargs is None: @@ -76,7 +75,7 @@ def _test_class_op(self, method, meth_kwargs=None, test_exact_match=True, **matc torch.manual_seed(12) transformed_tensor_script = scripted_fn(tensor) - assert_equal(transformed_tensor, transformed_tensor_script) + self.assertTrue(transformed_tensor.equal(transformed_tensor_script)) batch_tensors = self._create_data_batch(height=23, width=34, channels=3, num_samples=4, device=self.device) self._test_transform_vs_scripted_on_batch(f, scripted_fn, batch_tensors) @@ -271,11 +270,8 @@ def _test_op_list_output(self, func, method, out_length, fn_kwargs=None, meth_kw self.assertEqual(len(transformed_t_list), len(transformed_t_list_script)) self.assertEqual(len(transformed_t_list_script), out_length) for transformed_tensor, transformed_tensor_script in zip(transformed_t_list, transformed_t_list_script): - assert_equal( - transformed_tensor, - transformed_tensor_script, - msg="{} vs {}".format(transformed_tensor, transformed_tensor_script), - ) + self.assertTrue(transformed_tensor.equal(transformed_tensor_script), + msg="{} vs {}".format(transformed_tensor, transformed_tensor_script)) # test for class interface fn = getattr(T, method)(**meth_kwargs) @@ -293,11 +289,8 @@ def _test_op_list_output(self, func, method, out_length, fn_kwargs=None, meth_kw torch.manual_seed(12) transformed_img_list = fn(img_tensor) for transformed_img, transformed_batch in zip(transformed_img_list, transformed_batch_list): - assert_equal( - transformed_img, - transformed_batch[i, ...], - msg="{} vs {}".format(transformed_img, transformed_batch[i, ...]), - ) + self.assertTrue(transformed_img.equal(transformed_batch[i, ...]), + msg="{} vs {}".format(transformed_img, transformed_batch[i, ...])) with get_tmp_dir() as tmp_dir: scripted_fn.save(os.path.join(tmp_dir, "t_op_list_{}.pt".format(method))) @@ -512,7 +505,7 @@ def test_linear_transformation(self): transformed_batch = fn(batch_tensors) torch.manual_seed(12) s_transformed_batch = scripted_fn(batch_tensors) - assert_equal(transformed_batch, s_transformed_batch) + self.assertTrue(transformed_batch.equal(s_transformed_batch)) with get_tmp_dir() as tmp_dir: scripted_fn.save(os.path.join(tmp_dir, "t_norm.pt")) @@ -532,7 +525,7 @@ def test_compose(self): transformed_tensor = transforms(tensor) torch.manual_seed(12) transformed_tensor_script = scripted_fn(tensor) - assert_equal(transformed_tensor, transformed_tensor_script, msg="{}".format(transforms)) + self.assertTrue(transformed_tensor.equal(transformed_tensor_script), msg="{}".format(transforms)) t = T.Compose([ lambda x: x, @@ -558,7 +551,7 @@ def test_random_apply(self): transformed_tensor = transforms(tensor) torch.manual_seed(12) transformed_tensor_script = scripted_fn(tensor) - assert_equal(transformed_tensor, transformed_tensor_script, msg="{}".format(transforms)) + self.assertTrue(transformed_tensor.equal(transformed_tensor_script), msg="{}".format(transforms)) if torch.device(self.device).type == "cpu": # Can't check this twice, otherwise diff --git a/test/test_transforms_video.py b/test/test_transforms_video.py index 942bb010f71..e0c7ab5260b 100644 --- a/test/test_transforms_video.py +++ b/test/test_transforms_video.py @@ -4,7 +4,6 @@ import random import numpy as np import warnings -from _assert_utils import assert_equal try: from scipy import stats @@ -121,7 +120,7 @@ def samples_from_standard_normal(tensor): # Checking the optional in-place behaviour tensor = torch.rand((3, 128, 16, 16)) tensor_inplace = transforms.NormalizeVideo((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)(tensor) - assert_equal(tensor, tensor_inplace) + self.assertTrue(torch.equal(tensor, tensor_inplace)) transforms.NormalizeVideo((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True).__repr__() diff --git a/test/test_video_reader.py b/test/test_video_reader.py index d9326138397..5b9b2184daf 100644 --- a/test/test_video_reader.py +++ b/test/test_video_reader.py @@ -11,7 +11,6 @@ from numpy.random import randint from torchvision.io import _HAS_VIDEO_OPT from common_utils import PY39_SKIP -from _assert_utils import assert_equal try: @@ -360,7 +359,8 @@ def compare_decoding_result(self, tv_result, ref_result, config=all_check_config ) self.assertAlmostEqual(mean_delta, 0, delta=1.0) - assert_equal(vtimebase, ref_result.vtimebase) + is_same = torch.all(torch.eq(vtimebase, ref_result.vtimebase)).item() + self.assertEqual(is_same, True) if ( config.check_aframes @@ -369,7 +369,8 @@ def compare_decoding_result(self, tv_result, ref_result, config=all_check_config ): """Audio stream is available and audio frame is required to return from decoder""" - assert_equal(aframes, ref_result.aframes) + is_same = torch.all(torch.eq(aframes, ref_result.aframes)).item() + self.assertEqual(is_same, True) if ( config.check_aframe_pts @@ -377,9 +378,11 @@ def compare_decoding_result(self, tv_result, ref_result, config=all_check_config and ref_result.aframe_pts.numel() > 0 ): """Audio stream is available""" - assert_equal(aframe_pts, ref_result.aframe_pts) + is_same = torch.all(torch.eq(aframe_pts, ref_result.aframe_pts)).item() + self.assertEqual(is_same, True) - assert_equal(atimebase, ref_result.atimebase) + is_same = torch.all(torch.eq(atimebase, ref_result.atimebase)).item() + self.assertEqual(is_same, True) @unittest.skip( "This stress test will iteratively decode the same set of videos."