|
5 | 5 | import torch |
6 | 6 | from torchvision._utils import StrEnum |
7 | 7 | from torchvision.transforms import InterpolationMode |
| 8 | +from torchvision.transforms.functional import _get_inverse_affine_matrix |
| 9 | +from torchvision.transforms.functional_tensor import _compute_output_size |
8 | 10 |
|
9 | 11 | from ._feature import _Feature |
10 | 12 |
|
@@ -168,10 +170,18 @@ def rotate( |
168 | 170 | output = _F.rotate_bounding_box( |
169 | 171 | self, format=self.format, image_size=self.image_size, angle=angle, expand=expand, center=center |
170 | 172 | ) |
171 | | - # TODO: update output image size if expand is True |
| 173 | + image_size = self.image_size |
172 | 174 | if expand: |
173 | | - raise RuntimeError("Not yet implemented") |
174 | | - return BoundingBox.new_like(self, output, dtype=output.dtype) |
| 175 | + # The way we recompute image_size is not optimal due to redundant computations of |
| 176 | + # - rotation matrix (_get_inverse_affine_matrix) |
| 177 | + # - points dot matrix (_compute_output_size) |
| 178 | + # Alternatively, we could return new image size by _F.rotate_bounding_box |
| 179 | + height, width = image_size |
| 180 | + rotation_matrix = _get_inverse_affine_matrix([0.0, 0.0], angle, [0.0, 0.0], 1.0, [0.0, 0.0]) |
| 181 | + new_width, new_height = _compute_output_size(rotation_matrix, width, height) |
| 182 | + image_size = (new_height, new_width) |
| 183 | + |
| 184 | + return BoundingBox.new_like(self, output, dtype=output.dtype, image_size=image_size) |
175 | 185 |
|
176 | 186 | def affine( |
177 | 187 | self, |
|
0 commit comments