Skip to content

Commit 005a334

Browse files
Amitav Baruahfacebook-github-bot
authored andcommitted
Render PyTorch3d cameras in plotly
Summary: Take in a renderer with camera(s) and render the cameras as wireframes in the corresponding plotly plots Reviewed By: nikhilaravi Differential Revision: D24151706 fbshipit-source-id: f8e86d61f3d991500bafc0533738c79b96bda630
1 parent 0351096 commit 005a334

File tree

2 files changed

+130
-33
lines changed

2 files changed

+130
-33
lines changed

docs/tutorials/utils/camera_visualization.py

Lines changed: 1 addition & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,23 +1,8 @@
11
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
22

33
import matplotlib.pyplot as plt
4-
import torch
54
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
6-
7-
8-
def get_camera_wireframe(scale: float = 0.3):
9-
"""
10-
Returns a wireframe of a 3D line-plot of a camera symbol.
11-
"""
12-
a = 0.5 * torch.tensor([-2, 1.5, 4])
13-
b = 0.5 * torch.tensor([2, 1.5, 4])
14-
c = 0.5 * torch.tensor([-2, -1.5, 4])
15-
d = 0.5 * torch.tensor([2, -1.5, 4])
16-
C = torch.zeros(3)
17-
F = torch.tensor([0, 0, 3])
18-
camera_points = [a, b, d, c, a, C, b, d, C, c, C, F]
19-
lines = torch.stack([x.float() for x in camera_points]) * scale
20-
return lines
5+
from pytorch3d.vis.plotly_vis import get_camera_wireframe
216

227

238
def plot_cameras(ax, cameras, color: str = "blue"):

pytorch3d/vis/plotly_vis.py

Lines changed: 129 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,23 @@
1212
from pytorch3d.structures import Meshes, Pointclouds, join_meshes_as_scene
1313

1414

15+
def get_camera_wireframe(scale: float = 0.3):
16+
"""
17+
Returns a wireframe of a 3D line-plot of a camera symbol.
18+
"""
19+
a = 0.5 * torch.tensor([-2, 1.5, 4])
20+
up1 = 0.5 * torch.tensor([0, 1.5, 4])
21+
up2 = 0.5 * torch.tensor([0, 2, 4])
22+
b = 0.5 * torch.tensor([2, 1.5, 4])
23+
c = 0.5 * torch.tensor([-2, -1.5, 4])
24+
d = 0.5 * torch.tensor([2, -1.5, 4])
25+
C = torch.zeros(3)
26+
F = torch.tensor([0, 0, 3])
27+
camera_points = [a, up1, up2, up1, b, d, c, a, C, b, d, C, c, C, F]
28+
lines = torch.stack([x.float() for x in camera_points]) * scale
29+
return lines
30+
31+
1532
class AxisArgs(NamedTuple):
1633
showgrid: bool = False
1734
zeroline: bool = False
@@ -33,18 +50,20 @@ class Lighting(NamedTuple):
3350

3451

3552
def plot_scene(
36-
plots: Dict[str, Dict[str, Union[Pointclouds, Meshes]]],
53+
plots: Dict[str, Dict[str, Union[Pointclouds, Meshes, CamerasBase]]],
3754
*,
3855
viewpoint_cameras: Optional[CamerasBase] = None,
3956
ncols: int = 1,
57+
camera_scale: float = 0.3,
4058
pointcloud_max_points: int = 20000,
4159
pointcloud_marker_size: int = 1,
4260
**kwargs,
4361
):
4462
"""
4563
Main function to visualize Meshes and Pointclouds.
46-
Plots input Pointclouds and Meshes data into named subplots,
47-
with named traces based on the dictionary keys.
64+
Plots input Pointclouds, Meshes, and Cameras data into named subplots,
65+
with named traces based on the dictionary keys. Cameras are
66+
rendered at the camera center location using a wireframe.
4867
4968
Args:
5069
plots: A dict containing subplot and trace names,
@@ -57,6 +76,7 @@ def plot_scene(
5776
for all the subplots will be viewed from that point.
5877
Otherwise, the viewpoint_cameras will not be used.
5978
ncols: the number of subplots per row
79+
camera_scale: determines the size of the wireframe used to render cameras.
6080
pointcloud_max_points: the maximum number of points to plot from
6181
a pointcloud. If more are present, a random sample of size
6282
pointcloud_max_points is used.
@@ -84,7 +104,7 @@ def plot_scene(
84104
85105
The above example will render one subplot which has both a mesh and pointcloud.
86106
87-
If the Meshes or Pointclouds objects are batched, then every object in that batch
107+
If the Meshes, Pointclouds, or Cameras objects are batched, then every object in that batch
88108
will be plotted in a single trace.
89109
90110
..code-block::python
@@ -144,6 +164,23 @@ def plot_scene(
144164
The above example will render the first subplot seen from the camera on the +z axis,
145165
and the second subplot from the viewpoint of the camera on the -z axis.
146166
167+
We can visualize these cameras as well:
168+
..code-block::python
169+
mesh = ...
170+
R, T = look_at_view_transform(2.7, 0, [0, 180]) # 2 camera angles, front and back
171+
# Any instance of CamerasBase works, here we use FoVPerspectiveCameras
172+
cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
173+
fig = plot_scene({
174+
"subplot1_title": {
175+
"mesh_trace_title": mesh,
176+
"cameras_trace_title": cameras,
177+
},
178+
})
179+
fig.show()
180+
181+
The above example will render one subplot with the mesh object
182+
and two cameras.
183+
147184
For an example of using kwargs, see below:
148185
..code-block::python
149186
mesh = ...
@@ -227,9 +264,15 @@ def plot_scene(
227264
pointcloud_max_points,
228265
pointcloud_marker_size,
229266
)
267+
elif isinstance(struct, CamerasBase):
268+
_add_camera_trace(
269+
fig, struct, trace_name, subplot_idx, ncols, camera_scale
270+
)
230271
else:
231272
raise ValueError(
232-
"struct {} is not a Meshes or Pointclouds object".format(struct)
273+
"struct {} is not a Cameras, Meshes or Pointclouds object".format(
274+
struct
275+
)
233276
)
234277

235278
# Ensure update for every subplot.
@@ -285,7 +328,9 @@ def plot_scene(
285328

286329

287330
def plot_batch_individually(
288-
batched_structs: Union[List[Union[Meshes, Pointclouds]], Meshes, Pointclouds],
331+
batched_structs: Union[
332+
List[Union[Meshes, Pointclouds, CamerasBase]], Meshes, Pointclouds, CamerasBase
333+
],
289334
*,
290335
viewpoint_cameras: Optional[CamerasBase] = None,
291336
ncols: int = 1,
@@ -295,26 +340,26 @@ def plot_batch_individually(
295340
):
296341
"""
297342
This is a higher level plotting function than plot_scene, for plotting
298-
Meshes and Pointclouds in simple cases. The simplest use is to plot a
299-
single Meshes or Pointclouds object, where you just pass it in as a
343+
Cameras, Meshes and Pointclouds in simple cases. The simplest use is to plot a
344+
single Cameras, Meshes or Pointclouds object, where you just pass it in as a
300345
one element list. This will plot each batch element in a separate subplot.
301346
302-
More generally, you can supply multiple Meshes or Pointclouds
347+
More generally, you can supply multiple Cameras, Meshes or Pointclouds
303348
having the same batch size `n`. In this case, there will be `n` subplots,
304349
each depicting the corresponding batch element of all the inputs.
305350
306-
In addition, you can include Meshes and Pointclouds of size 1 in
351+
In addition, you can include Cameras, Meshes and Pointclouds of size 1 in
307352
the input. These will either be rendered in the first subplot
308353
(if extend_struct is False), or in every subplot.
309354
310355
Args:
311-
batched_structs: a list of Meshes and/or Pointclouds to be rendered.
356+
batched_structs: a list of Cameras, Meshes and/or Pointclouds to be rendered.
312357
Each structure's corresponding batch element will be plotted in
313358
a single subplot, resulting in n subplots for a batch of size n.
314359
Every struct should either have the same batch size or be of batch size 1.
315360
See extend_struct and the description above for how batch size 1 structs
316-
are handled. Also accepts a single Meshes or Pointclouds object, which will have
317-
each individual element plotted in its own subplot.
361+
are handled. Also accepts a single Cameras, Meshes or Pointclouds object,
362+
which will have each individual element plotted in its own subplot.
318363
viewpoint_cameras: an instance of a Cameras object providing a location
319364
to view the plotly plot from. If the batch size is equal
320365
to the number of subplots, it is a one to one mapping.
@@ -408,10 +453,10 @@ def plot_batch_individually(
408453

409454

410455
def _add_struct_from_batch(
411-
batched_struct: Union[Meshes, Pointclouds],
456+
batched_struct: Union[CamerasBase, Meshes, Pointclouds],
412457
scene_num: int,
413458
subplot_title: str,
414-
scene_dictionary: Dict[str, Dict[str, Union[Meshes, Pointclouds]]],
459+
scene_dictionary: Dict[str, Dict[str, Union[CamerasBase, Meshes, Pointclouds]]],
415460
trace_idx: int = 1,
416461
):
417462
"""
@@ -426,8 +471,18 @@ def _add_struct_from_batch(
426471
scene_dictionary: the dictionary to add the indexed struct to
427472
trace_idx: the trace number, starting at 1 for this struct's trace
428473
"""
429-
struct_idx = min(scene_num, len(batched_struct) - 1)
430-
struct = batched_struct[struct_idx]
474+
struct = None
475+
if isinstance(batched_struct, CamerasBase):
476+
# we can't index directly into camera batches
477+
R, T = batched_struct.R, batched_struct.T # pyre-ignore[16]
478+
r_idx = min(scene_num, len(R) - 1)
479+
t_idx = min(scene_num, len(T) - 1)
480+
R = R[r_idx].unsqueeze(0)
481+
T = T[t_idx].unsqueeze(0)
482+
struct = CamerasBase(device=batched_struct.device, R=R, T=T)
483+
else: # batched meshes and pointclouds are indexable
484+
struct_idx = min(scene_num, len(batched_struct) - 1)
485+
struct = batched_struct[struct_idx]
431486
trace_name = "trace{}-{}".format(scene_num + 1, trace_idx)
432487
scene_dictionary[subplot_title][trace_name] = struct
433488

@@ -568,6 +623,63 @@ def _add_pointcloud_trace(
568623
_update_axes_bounds(verts_center, max_expand, current_layout)
569624

570625

626+
def _add_camera_trace(
627+
fig: go.Figure,
628+
cameras: CamerasBase,
629+
trace_name: str,
630+
subplot_idx: int,
631+
ncols: int,
632+
camera_scale: float,
633+
):
634+
"""
635+
Adds a trace rendering a Cameras object to the passed in figure, with
636+
a given name and in a specific subplot.
637+
638+
Args:
639+
fig: plotly figure to add the trace within.
640+
cameras: the Cameras object to render. It can be batched.
641+
trace_name: name to label the trace with.
642+
subplot_idx: identifies the subplot, with 0 being the top left.
643+
ncols: the number of sublpots per row.
644+
camera_scale: the size of the wireframe used to render the Cameras object.
645+
"""
646+
cam_wires = get_camera_wireframe(camera_scale).to(cameras.device)
647+
cam_trans = cameras.get_world_to_view_transform().inverse()
648+
cam_wires_trans = cam_trans.transform_points(cam_wires).detach().cpu()
649+
# if batch size is 1, unsqueeze to add dimension
650+
if len(cam_wires_trans.shape) < 3:
651+
cam_wires_trans = cam_wires_trans.unsqueeze(0)
652+
653+
nan_tensor = torch.Tensor([[float("NaN")] * 3])
654+
all_cam_wires = cam_wires_trans[0]
655+
for wire in cam_wires_trans[1:]:
656+
# We combine camera points into a single tensor to plot them in a
657+
# single trace. The NaNs are inserted between sets of camera
658+
# points so that the lines drawn by Plotly are not drawn between
659+
# points that belong to different cameras.
660+
all_cam_wires = torch.cat((all_cam_wires, nan_tensor, wire))
661+
x, y, z = all_cam_wires.detach().cpu().numpy().T.astype(float)
662+
663+
row, col = subplot_idx // ncols + 1, subplot_idx % ncols + 1
664+
fig.add_trace(
665+
go.Scatter3d( # pyre-ignore [16]
666+
x=x, y=y, z=z, marker={"size": 1}, name=trace_name
667+
),
668+
row=row,
669+
col=col,
670+
)
671+
672+
# Access the current subplot's scene configuration
673+
plot_scene = "scene" + str(subplot_idx + 1)
674+
current_layout = fig["layout"][plot_scene]
675+
676+
# flatten for bounds calculations
677+
flattened_wires = cam_wires_trans.flatten(0, 1)
678+
verts_center = flattened_wires.mean(0)
679+
max_expand = (flattened_wires.max(0)[0] - flattened_wires.min(0)[0]).max()
680+
_update_axes_bounds(verts_center, max_expand, current_layout)
681+
682+
571683
def _gen_fig_with_subplots(batch_size: int, ncols: int, subplot_titles: List[str]):
572684
"""
573685
Takes in the number of objects to be plotted and generate a plotly figure

0 commit comments

Comments
 (0)