Skip to content

Commit d52eccf

Browse files
NicolasHugfacebook-github-bot
authored andcommitted
[fbsync] Added CUDA support for interpolation AA bilinear/bicubic (#3842)
Summary: * Added CUDA support for interpolation AA bilinear/bicubic * Fixed code formatting error * Fixed cuda tests on cpu-only builds * Fixed cuda internal torch check for bicubic mode Reviewed By: cpuhrsch Differential Revision: D28538753 fbshipit-source-id: 8b6c16db07a05fdba4b35cb8f5ec077b50691fb7
1 parent 9f85834 commit d52eccf

File tree

2 files changed

+420
-2
lines changed

2 files changed

+420
-2
lines changed

test/test_functional_tensor.py

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
import torchvision.transforms as T
1515
from torchvision.transforms import InterpolationMode
1616

17-
from common_utils import TransformsTester, cpu_and_gpu
17+
from common_utils import TransformsTester, cpu_and_gpu, needs_cuda
1818

1919
from typing import Dict, List, Sequence, Tuple
2020

@@ -868,12 +868,14 @@ def test_perspective_interpolation_warning(tester):
868868
tester.assertTrue(res1.equal(res2))
869869

870870

871-
@pytest.mark.parametrize('device', ["cpu", ])
871+
@pytest.mark.parametrize('device', cpu_and_gpu())
872872
@pytest.mark.parametrize('dt', [None, torch.float32, torch.float64, torch.float16])
873873
@pytest.mark.parametrize('size', [[96, 72], [96, 420], [420, 72]])
874874
@pytest.mark.parametrize('interpolation', [BILINEAR, BICUBIC])
875875
def test_resize_antialias(device, dt, size, interpolation, tester):
876876

877+
torch.manual_seed(12)
878+
877879
if dt == torch.float16 and device == "cpu":
878880
# skip float16 on CPU case
879881
return
@@ -924,6 +926,19 @@ def test_resize_antialias(device, dt, size, interpolation, tester):
924926
tester.assertTrue(resized_tensor.equal(resize_result), msg=f"{size}, {interpolation}, {dt}")
925927

926928

929+
@needs_cuda
930+
@pytest.mark.parametrize('interpolation', [BILINEAR, BICUBIC])
931+
def test_assert_resize_antialias(interpolation, tester):
932+
933+
# Checks implementation on very large scales
934+
# and catch TORCH_CHECK inside interpolate_aa_kernels.cu
935+
torch.manual_seed(12)
936+
tensor, pil_img = tester._create_data(1000, 1000, device="cuda")
937+
938+
with pytest.raises(RuntimeError, match=r"Max supported scale factor is"):
939+
F.resize(tensor, size=(5, 5), interpolation=interpolation, antialias=True)
940+
941+
927942
def check_functional_vs_PIL_vs_scripted(fn, fn_pil, fn_t, config, device, dtype, tol=2.0 + 1e-10, agg_method="max"):
928943

929944
tester = Tester()

0 commit comments

Comments
 (0)