@@ -67,6 +67,7 @@ def test_scale_channel():
67
67
68
68
69
69
class TestRotate :
70
+
70
71
ALL_DTYPES = [None , torch .float32 , torch .float64 , torch .float16 ]
71
72
scripted_rotate = torch .jit .script (F .rotate )
72
73
IMG_W = 26
@@ -152,6 +153,7 @@ def test_rotate_interpolation_type(self):
152
153
153
154
154
155
class TestAffine :
156
+
155
157
ALL_DTYPES = [None , torch .float32 , torch .float64 , torch .float16 ]
156
158
scripted_affine = torch .jit .script (F .affine )
157
159
@@ -405,6 +407,7 @@ def _get_data_dims_and_points_for_perspective():
405
407
)
406
408
@pytest .mark .parametrize ("fn" , [F .perspective , torch .jit .script (F .perspective )])
407
409
def test_perspective_pil_vs_tensor (device , dims_and_points , dt , fill , fn ):
410
+
408
411
if dt == torch .float16 and device == "cpu" :
409
412
# skip float16 on CPU case
410
413
return
@@ -436,6 +439,7 @@ def test_perspective_pil_vs_tensor(device, dims_and_points, dt, fill, fn):
436
439
@pytest .mark .parametrize ("dims_and_points" , _get_data_dims_and_points_for_perspective ())
437
440
@pytest .mark .parametrize ("dt" , [None , torch .float32 , torch .float64 , torch .float16 ])
438
441
def test_perspective_batch (device , dims_and_points , dt ):
442
+
439
443
if dt == torch .float16 and device == "cpu" :
440
444
# skip float16 on CPU case
441
445
return
@@ -487,6 +491,7 @@ def test_perspective_interpolation_type():
487
491
@pytest .mark .parametrize ("max_size" , [None , 34 , 40 , 1000 ])
488
492
@pytest .mark .parametrize ("interpolation" , [BILINEAR , BICUBIC , NEAREST , NEAREST_EXACT ])
489
493
def test_resize (device , dt , size , max_size , interpolation ):
494
+
490
495
if dt == torch .float16 and device == "cpu" :
491
496
# skip float16 on CPU case
492
497
return
@@ -536,6 +541,7 @@ def test_resize(device, dt, size, max_size, interpolation):
536
541
537
542
@pytest .mark .parametrize ("device" , cpu_and_gpu ())
538
543
def test_resize_asserts (device ):
544
+
539
545
tensor , pil_img = _create_data (26 , 36 , device = device )
540
546
541
547
res1 = F .resize (tensor , size = 32 , interpolation = PIL .Image .BILINEAR )
@@ -555,6 +561,7 @@ def test_resize_asserts(device):
555
561
@pytest .mark .parametrize ("size" , [[96 , 72 ], [96 , 420 ], [420 , 72 ]])
556
562
@pytest .mark .parametrize ("interpolation" , [BILINEAR , BICUBIC ])
557
563
def test_resize_antialias (device , dt , size , interpolation ):
564
+
558
565
if dt == torch .float16 and device == "cpu" :
559
566
# skip float16 on CPU case
560
567
return
@@ -603,6 +610,7 @@ def test_resize_antialias(device, dt, size, interpolation):
603
610
604
611
605
612
def test_resize_antialias_default_warning ():
613
+
606
614
img = torch .randint (0 , 256 , size = (3 , 44 , 56 ), dtype = torch .uint8 )
607
615
608
616
match = "The default value of the antialias"
@@ -621,6 +629,7 @@ def test_resize_antialias_default_warning():
621
629
def check_functional_vs_PIL_vs_scripted (
622
630
fn , fn_pil , fn_t , config , device , dtype , channels = 3 , tol = 2.0 + 1e-10 , agg_method = "max"
623
631
):
632
+
624
633
script_fn = torch .jit .script (fn )
625
634
torch .manual_seed (15 )
626
635
tensor , pil_img = _create_data (26 , 34 , channels = channels , device = device )
@@ -1057,6 +1066,7 @@ def test_crop(device, top, left, height, width):
1057
1066
@pytest .mark .parametrize ("sigma" , [[0.5 , 0.5 ], (0.5 , 0.5 ), (0.8 , 0.8 ), (1.7 , 1.7 )])
1058
1067
@pytest .mark .parametrize ("fn" , [F .gaussian_blur , torch .jit .script (F .gaussian_blur )])
1059
1068
def test_gaussian_blur (device , image_size , dt , ksize , sigma , fn ):
1069
+
1060
1070
# true_cv2_results = {
1061
1071
# # np_img = np.arange(3 * 10 * 12, dtype="uint8").reshape((10, 12, 3))
1062
1072
# # cv2.GaussianBlur(np_img, ksize=(3, 3), sigmaX=0.8)
0 commit comments