|
29 | 29 | from torch.utils._pytree import tree_flatten, tree_unflatten
|
30 | 30 | from torchvision import datapoints
|
31 | 31 | from torchvision.ops.boxes import box_iou
|
32 |
| -from torchvision.transforms.functional import InterpolationMode, pil_to_tensor, to_pil_image |
| 32 | +from torchvision.transforms.functional import InterpolationMode, to_pil_image |
33 | 33 | from torchvision.transforms.v2 import functional as F
|
34 | 34 | from torchvision.transforms.v2.utils import check_type, is_simple_tensor, query_chw
|
35 | 35 |
|
@@ -406,112 +406,6 @@ def was_applied(output, inpt):
|
406 | 406 | assert transform.was_applied(output, input)
|
407 | 407 |
|
408 | 408 |
|
409 |
| -@pytest.mark.parametrize("p", [0.0, 1.0]) |
410 |
| -class TestRandomHorizontalFlip: |
411 |
| - def input_expected_image_tensor(self, p, dtype=torch.float32): |
412 |
| - input = torch.tensor([[[0, 1], [0, 1]], [[1, 0], [1, 0]]], dtype=dtype) |
413 |
| - expected = torch.tensor([[[1, 0], [1, 0]], [[0, 1], [0, 1]]], dtype=dtype) |
414 |
| - |
415 |
| - return input, expected if p == 1 else input |
416 |
| - |
417 |
| - def test_simple_tensor(self, p): |
418 |
| - input, expected = self.input_expected_image_tensor(p) |
419 |
| - transform = transforms.RandomHorizontalFlip(p=p) |
420 |
| - |
421 |
| - actual = transform(input) |
422 |
| - |
423 |
| - assert_equal(expected, actual) |
424 |
| - |
425 |
| - def test_pil_image(self, p): |
426 |
| - input, expected = self.input_expected_image_tensor(p, dtype=torch.uint8) |
427 |
| - transform = transforms.RandomHorizontalFlip(p=p) |
428 |
| - |
429 |
| - actual = transform(to_pil_image(input)) |
430 |
| - |
431 |
| - assert_equal(expected, pil_to_tensor(actual)) |
432 |
| - |
433 |
| - def test_datapoints_image(self, p): |
434 |
| - input, expected = self.input_expected_image_tensor(p) |
435 |
| - transform = transforms.RandomHorizontalFlip(p=p) |
436 |
| - |
437 |
| - actual = transform(datapoints.Image(input)) |
438 |
| - |
439 |
| - assert_equal(datapoints.Image(expected), actual) |
440 |
| - |
441 |
| - def test_datapoints_mask(self, p): |
442 |
| - input, expected = self.input_expected_image_tensor(p) |
443 |
| - transform = transforms.RandomHorizontalFlip(p=p) |
444 |
| - |
445 |
| - actual = transform(datapoints.Mask(input)) |
446 |
| - |
447 |
| - assert_equal(datapoints.Mask(expected), actual) |
448 |
| - |
449 |
| - def test_datapoints_bounding_box(self, p): |
450 |
| - input = datapoints.BoundingBox([0, 0, 5, 5], format=datapoints.BoundingBoxFormat.XYXY, spatial_size=(10, 10)) |
451 |
| - transform = transforms.RandomHorizontalFlip(p=p) |
452 |
| - |
453 |
| - actual = transform(input) |
454 |
| - |
455 |
| - expected_image_tensor = torch.tensor([5, 0, 10, 5]) if p == 1.0 else input |
456 |
| - expected = datapoints.BoundingBox.wrap_like(input, expected_image_tensor) |
457 |
| - assert_equal(expected, actual) |
458 |
| - assert actual.format == expected.format |
459 |
| - assert actual.spatial_size == expected.spatial_size |
460 |
| - |
461 |
| - |
462 |
| -@pytest.mark.parametrize("p", [0.0, 1.0]) |
463 |
| -class TestRandomVerticalFlip: |
464 |
| - def input_expected_image_tensor(self, p, dtype=torch.float32): |
465 |
| - input = torch.tensor([[[1, 1], [0, 0]], [[1, 1], [0, 0]]], dtype=dtype) |
466 |
| - expected = torch.tensor([[[0, 0], [1, 1]], [[0, 0], [1, 1]]], dtype=dtype) |
467 |
| - |
468 |
| - return input, expected if p == 1 else input |
469 |
| - |
470 |
| - def test_simple_tensor(self, p): |
471 |
| - input, expected = self.input_expected_image_tensor(p) |
472 |
| - transform = transforms.RandomVerticalFlip(p=p) |
473 |
| - |
474 |
| - actual = transform(input) |
475 |
| - |
476 |
| - assert_equal(expected, actual) |
477 |
| - |
478 |
| - def test_pil_image(self, p): |
479 |
| - input, expected = self.input_expected_image_tensor(p, dtype=torch.uint8) |
480 |
| - transform = transforms.RandomVerticalFlip(p=p) |
481 |
| - |
482 |
| - actual = transform(to_pil_image(input)) |
483 |
| - |
484 |
| - assert_equal(expected, pil_to_tensor(actual)) |
485 |
| - |
486 |
| - def test_datapoints_image(self, p): |
487 |
| - input, expected = self.input_expected_image_tensor(p) |
488 |
| - transform = transforms.RandomVerticalFlip(p=p) |
489 |
| - |
490 |
| - actual = transform(datapoints.Image(input)) |
491 |
| - |
492 |
| - assert_equal(datapoints.Image(expected), actual) |
493 |
| - |
494 |
| - def test_datapoints_mask(self, p): |
495 |
| - input, expected = self.input_expected_image_tensor(p) |
496 |
| - transform = transforms.RandomVerticalFlip(p=p) |
497 |
| - |
498 |
| - actual = transform(datapoints.Mask(input)) |
499 |
| - |
500 |
| - assert_equal(datapoints.Mask(expected), actual) |
501 |
| - |
502 |
| - def test_datapoints_bounding_box(self, p): |
503 |
| - input = datapoints.BoundingBox([0, 0, 5, 5], format=datapoints.BoundingBoxFormat.XYXY, spatial_size=(10, 10)) |
504 |
| - transform = transforms.RandomVerticalFlip(p=p) |
505 |
| - |
506 |
| - actual = transform(input) |
507 |
| - |
508 |
| - expected_image_tensor = torch.tensor([0, 5, 5, 10]) if p == 1.0 else input |
509 |
| - expected = datapoints.BoundingBox.wrap_like(input, expected_image_tensor) |
510 |
| - assert_equal(expected, actual) |
511 |
| - assert actual.format == expected.format |
512 |
| - assert actual.spatial_size == expected.spatial_size |
513 |
| - |
514 |
| - |
515 | 409 | class TestPad:
|
516 | 410 | def test_assertions(self):
|
517 | 411 | with pytest.raises(TypeError, match="Got inappropriate padding arg"):
|
@@ -721,130 +615,6 @@ def test_boundingbox_spatial_size(self, angle, expand):
|
721 | 615 | assert out_img.spatial_size == out_bbox.spatial_size
|
722 | 616 |
|
723 | 617 |
|
724 |
| -class TestRandomAffine: |
725 |
| - def test_assertions(self): |
726 |
| - with pytest.raises(ValueError, match="is a single number, it must be positive"): |
727 |
| - transforms.RandomAffine(-0.7) |
728 |
| - |
729 |
| - for d in [[-0.7], [-0.7, 0, 0.7]]: |
730 |
| - with pytest.raises(ValueError, match="degrees should be a sequence of length 2"): |
731 |
| - transforms.RandomAffine(d) |
732 |
| - |
733 |
| - with pytest.raises(TypeError, match="Got inappropriate fill arg"): |
734 |
| - transforms.RandomAffine(12, fill="abc") |
735 |
| - |
736 |
| - with pytest.raises(TypeError, match="Got inappropriate fill arg"): |
737 |
| - transforms.RandomAffine(12, fill="abc") |
738 |
| - |
739 |
| - for kwargs in [ |
740 |
| - {"center": 12}, |
741 |
| - {"translate": 12}, |
742 |
| - {"scale": 12}, |
743 |
| - ]: |
744 |
| - with pytest.raises(TypeError, match="should be a sequence of length"): |
745 |
| - transforms.RandomAffine(12, **kwargs) |
746 |
| - |
747 |
| - for kwargs in [{"center": [1, 2, 3]}, {"translate": [1, 2, 3]}, {"scale": [1, 2, 3]}]: |
748 |
| - with pytest.raises(ValueError, match="should be a sequence of length"): |
749 |
| - transforms.RandomAffine(12, **kwargs) |
750 |
| - |
751 |
| - with pytest.raises(ValueError, match="translation values should be between 0 and 1"): |
752 |
| - transforms.RandomAffine(12, translate=[-1.0, 2.0]) |
753 |
| - |
754 |
| - with pytest.raises(ValueError, match="scale values should be positive"): |
755 |
| - transforms.RandomAffine(12, scale=[-1.0, 2.0]) |
756 |
| - |
757 |
| - with pytest.raises(ValueError, match="is a single number, it must be positive"): |
758 |
| - transforms.RandomAffine(12, shear=-10) |
759 |
| - |
760 |
| - for s in [[-0.7], [-0.7, 0, 0.7]]: |
761 |
| - with pytest.raises(ValueError, match="shear should be a sequence of length 2"): |
762 |
| - transforms.RandomAffine(12, shear=s) |
763 |
| - |
764 |
| - @pytest.mark.parametrize("degrees", [23, [0, 45], (0, 45)]) |
765 |
| - @pytest.mark.parametrize("translate", [None, [0.1, 0.2]]) |
766 |
| - @pytest.mark.parametrize("scale", [None, [0.7, 1.2]]) |
767 |
| - @pytest.mark.parametrize("shear", [None, 2.0, [5.0, 15.0], [1.0, 2.0, 3.0, 4.0]]) |
768 |
| - def test__get_params(self, degrees, translate, scale, shear, mocker): |
769 |
| - image = mocker.MagicMock(spec=datapoints.Image) |
770 |
| - image.num_channels = 3 |
771 |
| - image.spatial_size = (24, 32) |
772 |
| - h, w = image.spatial_size |
773 |
| - |
774 |
| - transform = transforms.RandomAffine(degrees, translate=translate, scale=scale, shear=shear) |
775 |
| - params = transform._get_params([image]) |
776 |
| - |
777 |
| - if not isinstance(degrees, (list, tuple)): |
778 |
| - assert -degrees <= params["angle"] <= degrees |
779 |
| - else: |
780 |
| - assert degrees[0] <= params["angle"] <= degrees[1] |
781 |
| - |
782 |
| - if translate is not None: |
783 |
| - w_max = int(round(translate[0] * w)) |
784 |
| - h_max = int(round(translate[1] * h)) |
785 |
| - assert -w_max <= params["translate"][0] <= w_max |
786 |
| - assert -h_max <= params["translate"][1] <= h_max |
787 |
| - else: |
788 |
| - assert params["translate"] == (0, 0) |
789 |
| - |
790 |
| - if scale is not None: |
791 |
| - assert scale[0] <= params["scale"] <= scale[1] |
792 |
| - else: |
793 |
| - assert params["scale"] == 1.0 |
794 |
| - |
795 |
| - if shear is not None: |
796 |
| - if isinstance(shear, float): |
797 |
| - assert -shear <= params["shear"][0] <= shear |
798 |
| - assert params["shear"][1] == 0.0 |
799 |
| - elif len(shear) == 2: |
800 |
| - assert shear[0] <= params["shear"][0] <= shear[1] |
801 |
| - assert params["shear"][1] == 0.0 |
802 |
| - else: |
803 |
| - assert shear[0] <= params["shear"][0] <= shear[1] |
804 |
| - assert shear[2] <= params["shear"][1] <= shear[3] |
805 |
| - else: |
806 |
| - assert params["shear"] == (0, 0) |
807 |
| - |
808 |
| - @pytest.mark.parametrize("degrees", [23, [0, 45], (0, 45)]) |
809 |
| - @pytest.mark.parametrize("translate", [None, [0.1, 0.2]]) |
810 |
| - @pytest.mark.parametrize("scale", [None, [0.7, 1.2]]) |
811 |
| - @pytest.mark.parametrize("shear", [None, 2.0, [5.0, 15.0], [1.0, 2.0, 3.0, 4.0]]) |
812 |
| - @pytest.mark.parametrize("fill", [0, [1, 2, 3], (2, 3, 4)]) |
813 |
| - @pytest.mark.parametrize("center", [None, [2.0, 3.0]]) |
814 |
| - def test__transform(self, degrees, translate, scale, shear, fill, center, mocker): |
815 |
| - interpolation = InterpolationMode.BILINEAR |
816 |
| - transform = transforms.RandomAffine( |
817 |
| - degrees, |
818 |
| - translate=translate, |
819 |
| - scale=scale, |
820 |
| - shear=shear, |
821 |
| - interpolation=interpolation, |
822 |
| - fill=fill, |
823 |
| - center=center, |
824 |
| - ) |
825 |
| - |
826 |
| - if isinstance(degrees, (tuple, list)): |
827 |
| - assert transform.degrees == [float(degrees[0]), float(degrees[1])] |
828 |
| - else: |
829 |
| - assert transform.degrees == [float(-degrees), float(degrees)] |
830 |
| - |
831 |
| - fn = mocker.patch("torchvision.transforms.v2.functional.affine") |
832 |
| - inpt = mocker.MagicMock(spec=datapoints.Image) |
833 |
| - inpt.num_channels = 3 |
834 |
| - inpt.spatial_size = (24, 32) |
835 |
| - |
836 |
| - # vfdev-5, Feature Request: let's store params as Transform attribute |
837 |
| - # This could be also helpful for users |
838 |
| - # Otherwise, we can mock transform._get_params |
839 |
| - torch.manual_seed(12) |
840 |
| - _ = transform(inpt) |
841 |
| - torch.manual_seed(12) |
842 |
| - params = transform._get_params([inpt]) |
843 |
| - |
844 |
| - fill = transforms._utils._convert_fill_arg(fill) |
845 |
| - fn.assert_called_once_with(inpt, **params, interpolation=interpolation, fill=fill, center=center) |
846 |
| - |
847 |
| - |
848 | 618 | class TestRandomCrop:
|
849 | 619 | def test_assertions(self):
|
850 | 620 | with pytest.raises(ValueError, match="Please provide only two dimensions"):
|
|
0 commit comments