Skip to content

Commit cff4876

Browse files
bottlerfacebook-github-bot
authored andcommitted
add from_ndc to unproject_points
Summary: Give unproject_points an argument letting it bypass screen space. use it to let the raysampler work for cameras defined in screen space. Reviewed By: gkioxari Differential Revision: D32596600 fbshipit-source-id: 2fe585dcd138cdbc65dd1c70e1957fd894512d3d
1 parent a0e2d2e commit cff4876

File tree

2 files changed

+39
-3
lines changed

2 files changed

+39
-3
lines changed

pytorch3d/renderer/cameras.py

Lines changed: 38 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,10 @@ def unproject_points(self):
129129
coordinates using the camera extrinsics `R` and `T`.
130130
`False` ignores `R` and `T` and unprojects to
131131
the camera view coordinates.
132+
from_ndc: If `False` (default), assumes xy part of input is in
133+
NDC space if self.in_ndc(), otherwise in screen space. If
134+
`True`, assumes xy is in NDC space even if the camera
135+
is defined in screen space.
132136
133137
Returns
134138
new_points: unprojected points with the same shape as `xy_depth`.
@@ -998,12 +1002,27 @@ def get_projection_transform(self, **kwargs) -> Transform3d:
9981002
return transform
9991003

10001004
def unproject_points(
1001-
self, xy_depth: torch.Tensor, world_coordinates: bool = True, **kwargs
1005+
self,
1006+
xy_depth: torch.Tensor,
1007+
world_coordinates: bool = True,
1008+
from_ndc: bool = False,
1009+
**kwargs
10021010
) -> torch.Tensor:
1011+
"""
1012+
Args:
1013+
from_ndc: If `False` (default), assumes xy part of input is in
1014+
NDC space if self.in_ndc(), otherwise in screen space. If
1015+
`True`, assumes xy is in NDC space even if the camera
1016+
is defined in screen space.
1017+
"""
10031018
if world_coordinates:
10041019
to_camera_transform = self.get_full_projection_transform(**kwargs)
10051020
else:
10061021
to_camera_transform = self.get_projection_transform(**kwargs)
1022+
if from_ndc:
1023+
to_camera_transform = to_camera_transform.compose(
1024+
self.get_ndc_camera_transform()
1025+
)
10071026

10081027
unprojection_transform = to_camera_transform.inverse()
10091028
xy_inv_depth = torch.cat(
@@ -1030,6 +1049,8 @@ def get_ndc_camera_transform(self, **kwargs) -> Transform3d:
10301049
For cameras defined in screen space, we adjust the principal point computation
10311050
which is defined in the image space (commonly) and scale the points to NDC space.
10321051
1052+
This transform leaves the depth unchanged.
1053+
10331054
Important: This transforms assumes PyTorch3D conventions for the input points,
10341055
i.e. +X left, +Y up.
10351056
"""
@@ -1199,12 +1220,27 @@ def get_projection_transform(self, **kwargs) -> Transform3d:
11991220
return transform
12001221

12011222
def unproject_points(
1202-
self, xy_depth: torch.Tensor, world_coordinates: bool = True, **kwargs
1223+
self,
1224+
xy_depth: torch.Tensor,
1225+
world_coordinates: bool = True,
1226+
from_ndc: bool = False,
1227+
**kwargs
12031228
) -> torch.Tensor:
1229+
"""
1230+
Args:
1231+
from_ndc: If `False` (default), assumes xy part of input is in
1232+
NDC space if self.in_ndc(), otherwise in screen space. If
1233+
`True`, assumes xy is in NDC space even if the camera
1234+
is defined in screen space.
1235+
"""
12041236
if world_coordinates:
12051237
to_camera_transform = self.get_full_projection_transform(**kwargs)
12061238
else:
12071239
to_camera_transform = self.get_projection_transform(**kwargs)
1240+
if from_ndc:
1241+
to_camera_transform = to_camera_transform.compose(
1242+
self.get_ndc_camera_transform()
1243+
)
12081244

12091245
unprojection_transform = to_camera_transform.inverse()
12101246
return unprojection_transform.transform_points(xy_depth)

pytorch3d/renderer/implicit/raysampling.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,7 @@ def _xy_to_ray_bundle(
312312
)
313313

314314
# unproject the points
315-
unprojected = cameras.unproject_points(to_unproject) # pyre-ignore
315+
unprojected = cameras.unproject_points(to_unproject, from_ndc=True) # pyre-ignore
316316

317317
# split the two planes back
318318
rays_plane_1_world = unprojected[:, :n_rays_per_image]

0 commit comments

Comments
 (0)