mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-08-02 03:42:50 +08:00
Example and test updates.
Summary: This commit performs pulsar example and test refinements. The examples are fully adjusted to adhere to PEP style guide and additional comments are added. Reviewed By: nikhilaravi Differential Revision: D24723391 fbshipit-source-id: 6d289006f080140159731e7f3a8c98b582164f1a
This commit is contained in:
parent
e9a26f263a
commit
b6be3b95fb
@ -7,49 +7,65 @@ Output: basic.png.
|
||||
"""
|
||||
import math
|
||||
from os import path
|
||||
import logging
|
||||
|
||||
import imageio
|
||||
import torch
|
||||
from pytorch3d.renderer.points.pulsar import Renderer
|
||||
|
||||
|
||||
torch.manual_seed(1)
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
n_points = 10
|
||||
width = 1_000
|
||||
height = 1_000
|
||||
device = torch.device("cuda")
|
||||
# The PyTorch3D system is right handed; in pulsar you can choose the handedness.
|
||||
# For easy reproducibility we use a right handed coordinate system here.
|
||||
renderer = Renderer(width, height, n_points, right_handed_system=True).to(device)
|
||||
# Generate sample data.
|
||||
vert_pos = torch.rand(n_points, 3, dtype=torch.float32, device=device) * 10.0
|
||||
vert_pos[:, 2] += 25.0
|
||||
vert_pos[:, :2] -= 5.0
|
||||
vert_col = torch.rand(n_points, 3, dtype=torch.float32, device=device)
|
||||
vert_rad = torch.rand(n_points, dtype=torch.float32, device=device)
|
||||
cam_params = torch.tensor(
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
0.0, # Position 0, 0, 0 (x, y, z).
|
||||
0.0,
|
||||
math.pi, # Because of the right handed system, the camera must look 'back'.
|
||||
0.0, # Rotation 0, 0, 0 (in axis-angle format).
|
||||
5.0, # Focal length in world size.
|
||||
2.0, # Sensor size in world size.
|
||||
],
|
||||
dtype=torch.float32,
|
||||
device=device,
|
||||
)
|
||||
# Render.
|
||||
image = renderer(
|
||||
vert_pos,
|
||||
vert_col,
|
||||
vert_rad,
|
||||
cam_params,
|
||||
1.0e-1, # Renderer blending parameter gamma, in [1., 1e-5].
|
||||
45.0, # Maximum depth.
|
||||
)
|
||||
print("Writing image to `%s`." % (path.abspath("basic.png")))
|
||||
imageio.imsave("basic.png", (image.cpu().detach() * 255.0).to(torch.uint8).numpy())
|
||||
|
||||
def cli():
|
||||
"""
|
||||
Basic example for the pulsar sphere renderer.
|
||||
|
||||
Writes to `basic.png`.
|
||||
"""
|
||||
LOGGER.info("Rendering on GPU...")
|
||||
torch.manual_seed(1)
|
||||
n_points = 10
|
||||
width = 1_000
|
||||
height = 1_000
|
||||
device = torch.device("cuda")
|
||||
# The PyTorch3D system is right handed; in pulsar you can choose the handedness.
|
||||
# For easy reproducibility we use a right handed coordinate system here.
|
||||
renderer = Renderer(width, height, n_points, right_handed_system=True).to(device)
|
||||
# Generate sample data.
|
||||
vert_pos = torch.rand(n_points, 3, dtype=torch.float32, device=device) * 10.0
|
||||
vert_pos[:, 2] += 25.0
|
||||
vert_pos[:, :2] -= 5.0
|
||||
vert_col = torch.rand(n_points, 3, dtype=torch.float32, device=device)
|
||||
vert_rad = torch.rand(n_points, dtype=torch.float32, device=device)
|
||||
cam_params = torch.tensor(
|
||||
[
|
||||
0.0,
|
||||
0.0,
|
||||
0.0, # Position 0, 0, 0 (x, y, z).
|
||||
0.0,
|
||||
math.pi, # Because of the right handed system, the camera must look 'back'.
|
||||
0.0, # Rotation 0, 0, 0 (in axis-angle format).
|
||||
5.0, # Focal length in world size.
|
||||
2.0, # Sensor size in world size.
|
||||
],
|
||||
dtype=torch.float32,
|
||||
device=device,
|
||||
)
|
||||
# Render.
|
||||
image = renderer(
|
||||
vert_pos,
|
||||
vert_col,
|
||||
vert_rad,
|
||||
cam_params,
|
||||
1.0e-1, # Renderer blending parameter gamma, in [1., 1e-5].
|
||||
45.0, # Maximum depth.
|
||||
)
|
||||
LOGGER.info("Writing image to `%s`.", path.abspath("basic.png"))
|
||||
imageio.imsave("basic.png", (image.cpu().detach() * 255.0).to(torch.uint8).numpy())
|
||||
LOGGER.info("Done.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
cli()
|
||||
|
@ -6,10 +6,14 @@ interface for sphere renderering. It renders and saves an image with
|
||||
10 random spheres.
|
||||
Output: basic-pt3d.png.
|
||||
"""
|
||||
import logging
|
||||
from os import path
|
||||
|
||||
import imageio
|
||||
import torch
|
||||
|
||||
# Import `look_at_view_transform` as needed in the suggestion later in the
|
||||
# example.
|
||||
from pytorch3d.renderer import PerspectiveCameras # , look_at_view_transform
|
||||
from pytorch3d.renderer import (
|
||||
PointsRasterizationSettings,
|
||||
@ -19,49 +23,65 @@ from pytorch3d.renderer import (
|
||||
from pytorch3d.structures import Pointclouds
|
||||
|
||||
|
||||
torch.manual_seed(1)
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
n_points = 10
|
||||
width = 1_000
|
||||
height = 1_000
|
||||
device = torch.device("cuda")
|
||||
|
||||
# Generate sample data.
|
||||
vert_pos = torch.rand(n_points, 3, dtype=torch.float32, device=device) * 10.0
|
||||
vert_pos[:, 2] += 25.0
|
||||
vert_pos[:, :2] -= 5.0
|
||||
vert_col = torch.rand(n_points, 3, dtype=torch.float32, device=device)
|
||||
pcl = Pointclouds(points=vert_pos[None, ...], features=vert_col[None, ...])
|
||||
# Alternatively, you can also use the look_at_view_transform to get R and T:
|
||||
# R, T = look_at_view_transform(
|
||||
# dist=30.0, elev=0.0, azim=180.0, at=((0.0, 0.0, 30.0),), up=((0, 1, 0),),
|
||||
# )
|
||||
cameras = PerspectiveCameras(
|
||||
# The focal length must be double the size for PyTorch3D because of the NDC
|
||||
# coordinates spanning a range of two - and they must be normalized by the
|
||||
# sensor width (see the pulsar example). This means we need here
|
||||
# 5.0 * 2.0 / 2.0 to get the equivalent results as in pulsar.
|
||||
focal_length=(5.0 * 2.0 / 2.0,),
|
||||
R=torch.eye(3, dtype=torch.float32, device=device)[None, ...],
|
||||
T=torch.zeros((1, 3), dtype=torch.float32, device=device),
|
||||
image_size=((width, height),),
|
||||
device=device,
|
||||
)
|
||||
vert_rad = torch.rand(n_points, dtype=torch.float32, device=device)
|
||||
raster_settings = PointsRasterizationSettings(
|
||||
image_size=(width, height),
|
||||
radius=vert_rad,
|
||||
)
|
||||
rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
|
||||
renderer = PulsarPointsRenderer(rasterizer=rasterizer).to(device)
|
||||
# Render.
|
||||
image = renderer(
|
||||
pcl,
|
||||
gamma=(1.0e-1,), # Renderer blending parameter gamma, in [1., 1e-5].
|
||||
znear=(1.0,),
|
||||
zfar=(45.0,),
|
||||
radius_world=True,
|
||||
bg_col=torch.ones((3,), dtype=torch.float32, device=device),
|
||||
)[0]
|
||||
print("Writing image to `%s`." % (path.abspath("basic-pt3d.png")))
|
||||
imageio.imsave("basic-pt3d.png", (image.cpu().detach() * 255.0).to(torch.uint8).numpy())
|
||||
def cli():
|
||||
"""
|
||||
Basic example for the pulsar sphere renderer using the PyTorch3D interface.
|
||||
|
||||
Writes to `basic-pt3d.png`.
|
||||
"""
|
||||
LOGGER.info("Rendering on GPU...")
|
||||
torch.manual_seed(1)
|
||||
n_points = 10
|
||||
width = 1_000
|
||||
height = 1_000
|
||||
device = torch.device("cuda")
|
||||
# Generate sample data.
|
||||
vert_pos = torch.rand(n_points, 3, dtype=torch.float32, device=device) * 10.0
|
||||
vert_pos[:, 2] += 25.0
|
||||
vert_pos[:, :2] -= 5.0
|
||||
vert_col = torch.rand(n_points, 3, dtype=torch.float32, device=device)
|
||||
pcl = Pointclouds(points=vert_pos[None, ...], features=vert_col[None, ...])
|
||||
# Alternatively, you can also use the look_at_view_transform to get R and T:
|
||||
# R, T = look_at_view_transform(
|
||||
# dist=30.0, elev=0.0, azim=180.0, at=((0.0, 0.0, 30.0),), up=((0, 1, 0),),
|
||||
# )
|
||||
cameras = PerspectiveCameras(
|
||||
# The focal length must be double the size for PyTorch3D because of the NDC
|
||||
# coordinates spanning a range of two - and they must be normalized by the
|
||||
# sensor width (see the pulsar example). This means we need here
|
||||
# 5.0 * 2.0 / 2.0 to get the equivalent results as in pulsar.
|
||||
focal_length=(5.0 * 2.0 / 2.0,),
|
||||
R=torch.eye(3, dtype=torch.float32, device=device)[None, ...],
|
||||
T=torch.zeros((1, 3), dtype=torch.float32, device=device),
|
||||
image_size=((width, height),),
|
||||
device=device,
|
||||
)
|
||||
vert_rad = torch.rand(n_points, dtype=torch.float32, device=device)
|
||||
raster_settings = PointsRasterizationSettings(
|
||||
image_size=(width, height),
|
||||
radius=vert_rad,
|
||||
)
|
||||
rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
|
||||
renderer = PulsarPointsRenderer(rasterizer=rasterizer).to(device)
|
||||
# Render.
|
||||
image = renderer(
|
||||
pcl,
|
||||
gamma=(1.0e-1,), # Renderer blending parameter gamma, in [1., 1e-5].
|
||||
znear=(1.0,),
|
||||
zfar=(45.0,),
|
||||
radius_world=True,
|
||||
bg_col=torch.ones((3,), dtype=torch.float32, device=device),
|
||||
)[0]
|
||||
LOGGER.info("Writing image to `%s`.", path.abspath("basic-pt3d.png"))
|
||||
imageio.imsave(
|
||||
"basic-pt3d.png", (image.cpu().detach() * 255.0).to(torch.uint8).numpy()
|
||||
)
|
||||
LOGGER.info("Done.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
cli()
|
||||
|
@ -9,6 +9,7 @@ distorted. Gradient-based optimization is used to converge towards the
|
||||
original camera parameters.
|
||||
Output: cam.gif.
|
||||
"""
|
||||
import logging
|
||||
import math
|
||||
from os import path
|
||||
|
||||
@ -21,10 +22,11 @@ from pytorch3d.transforms import axis_angle_to_matrix, matrix_to_rotation_6d
|
||||
from torch import nn, optim
|
||||
|
||||
|
||||
n_points = 20
|
||||
width = 1_000
|
||||
height = 1_000
|
||||
device = torch.device("cuda")
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
N_POINTS = 20
|
||||
WIDTH = 1_000
|
||||
HEIGHT = 1_000
|
||||
DEVICE = torch.device("cuda")
|
||||
|
||||
|
||||
class SceneModel(nn.Module):
|
||||
@ -45,20 +47,20 @@ class SceneModel(nn.Module):
|
||||
self.gamma = 0.1
|
||||
# Points.
|
||||
torch.manual_seed(1)
|
||||
vert_pos = torch.rand(n_points, 3, dtype=torch.float32) * 10.0
|
||||
vert_pos = torch.rand(N_POINTS, 3, dtype=torch.float32) * 10.0
|
||||
vert_pos[:, 2] += 25.0
|
||||
vert_pos[:, :2] -= 5.0
|
||||
self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=False))
|
||||
self.register_parameter(
|
||||
"vert_col",
|
||||
nn.Parameter(
|
||||
torch.rand(n_points, 3, dtype=torch.float32), requires_grad=False
|
||||
torch.rand(N_POINTS, 3, dtype=torch.float32), requires_grad=False
|
||||
),
|
||||
)
|
||||
self.register_parameter(
|
||||
"vert_rad",
|
||||
nn.Parameter(
|
||||
torch.rand(n_points, dtype=torch.float32), requires_grad=False
|
||||
torch.rand(N_POINTS, dtype=torch.float32), requires_grad=False
|
||||
),
|
||||
)
|
||||
self.register_parameter(
|
||||
@ -90,7 +92,7 @@ class SceneModel(nn.Module):
|
||||
torch.tensor([4.8, 1.8], dtype=torch.float32), requires_grad=True
|
||||
),
|
||||
)
|
||||
self.renderer = Renderer(width, height, n_points, right_handed_system=True)
|
||||
self.renderer = Renderer(WIDTH, HEIGHT, N_POINTS, right_handed_system=True)
|
||||
|
||||
def forward(self):
|
||||
return self.renderer.forward(
|
||||
@ -103,58 +105,71 @@ class SceneModel(nn.Module):
|
||||
)
|
||||
|
||||
|
||||
# Load reference.
|
||||
ref = (
|
||||
torch.from_numpy(
|
||||
imageio.imread(
|
||||
"../../tests/pulsar/reference/examples_TestRenderer_test_cam.png"
|
||||
)[:, ::-1, :].copy()
|
||||
).to(torch.float32)
|
||||
/ 255.0
|
||||
).to(device)
|
||||
# Set up model.
|
||||
model = SceneModel().to(device)
|
||||
# Optimizer.
|
||||
optimizer = optim.SGD(
|
||||
[
|
||||
{"params": [model.cam_pos], "lr": 1e-4}, # 1e-3
|
||||
{"params": [model.cam_rot], "lr": 5e-6},
|
||||
{"params": [model.cam_sensor], "lr": 1e-4},
|
||||
]
|
||||
)
|
||||
def cli():
|
||||
"""
|
||||
Camera optimization example using pulsar.
|
||||
|
||||
print("Writing video to `%s`." % (path.abspath("cam.gif")))
|
||||
writer = imageio.get_writer("cam.gif", format="gif", fps=25)
|
||||
|
||||
# Optimize.
|
||||
for i in range(300):
|
||||
optimizer.zero_grad()
|
||||
result = model()
|
||||
# Visualize.
|
||||
result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
|
||||
cv2.imshow("opt", result_im[:, :, ::-1])
|
||||
writer.append_data(result_im)
|
||||
overlay_img = np.ascontiguousarray(
|
||||
((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
|
||||
:, :, ::-1
|
||||
Writes to `cam.gif`.
|
||||
"""
|
||||
LOGGER.info("Loading reference...")
|
||||
# Load reference.
|
||||
ref = (
|
||||
torch.from_numpy(
|
||||
imageio.imread(
|
||||
"../../tests/pulsar/reference/examples_TestRenderer_test_cam.png"
|
||||
)[:, ::-1, :].copy()
|
||||
).to(torch.float32)
|
||||
/ 255.0
|
||||
).to(DEVICE)
|
||||
# Set up model.
|
||||
model = SceneModel().to(DEVICE)
|
||||
# Optimizer.
|
||||
optimizer = optim.SGD(
|
||||
[
|
||||
{"params": [model.cam_pos], "lr": 1e-4}, # 1e-3
|
||||
{"params": [model.cam_rot], "lr": 5e-6},
|
||||
{"params": [model.cam_sensor], "lr": 1e-4},
|
||||
]
|
||||
)
|
||||
overlay_img = cv2.putText(
|
||||
overlay_img,
|
||||
"Step %d" % (i),
|
||||
(10, 40),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
1,
|
||||
(0, 0, 0),
|
||||
2,
|
||||
cv2.LINE_AA,
|
||||
False,
|
||||
)
|
||||
cv2.imshow("overlay", overlay_img)
|
||||
cv2.waitKey(1)
|
||||
# Update.
|
||||
loss = ((result - ref) ** 2).sum()
|
||||
print("loss {}: {}".format(i, loss.item()))
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
writer.close()
|
||||
|
||||
LOGGER.info("Writing video to `%s`.", path.abspath("cam.gif"))
|
||||
writer = imageio.get_writer("cam.gif", format="gif", fps=25)
|
||||
|
||||
# Optimize.
|
||||
for i in range(300):
|
||||
optimizer.zero_grad()
|
||||
result = model()
|
||||
# Visualize.
|
||||
result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
|
||||
cv2.imshow("opt", result_im[:, :, ::-1])
|
||||
writer.append_data(result_im)
|
||||
overlay_img = np.ascontiguousarray(
|
||||
((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
|
||||
:, :, ::-1
|
||||
]
|
||||
)
|
||||
overlay_img = cv2.putText(
|
||||
overlay_img,
|
||||
"Step %d" % (i),
|
||||
(10, 40),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
1,
|
||||
(0, 0, 0),
|
||||
2,
|
||||
cv2.LINE_AA,
|
||||
False,
|
||||
)
|
||||
cv2.imshow("overlay", overlay_img)
|
||||
cv2.waitKey(1)
|
||||
# Update.
|
||||
loss = ((result - ref) ** 2).sum()
|
||||
LOGGER.info("loss %d: %f", i, loss.item())
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
writer.close()
|
||||
LOGGER.info("Done.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
cli()
|
||||
|
@ -10,11 +10,15 @@ original camera parameters.
|
||||
Output: cam-pt3d.gif
|
||||
"""
|
||||
from os import path
|
||||
import logging
|
||||
|
||||
import cv2
|
||||
import imageio
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
# Import `look_at_view_transform` as needed in the suggestion later in the
|
||||
# example.
|
||||
from pytorch3d.renderer.cameras import PerspectiveCameras # , look_at_view_transform
|
||||
from pytorch3d.renderer.points import (
|
||||
PointsRasterizationSettings,
|
||||
@ -26,10 +30,11 @@ from pytorch3d.transforms import axis_angle_to_matrix
|
||||
from torch import nn, optim
|
||||
|
||||
|
||||
n_points = 20
|
||||
width = 1_000
|
||||
height = 1_000
|
||||
device = torch.device("cuda")
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
N_POINTS = 20
|
||||
WIDTH = 1_000
|
||||
HEIGHT = 1_000
|
||||
DEVICE = torch.device("cuda")
|
||||
|
||||
|
||||
class SceneModel(nn.Module):
|
||||
@ -50,21 +55,21 @@ class SceneModel(nn.Module):
|
||||
self.gamma = 0.1
|
||||
# Points.
|
||||
torch.manual_seed(1)
|
||||
vert_pos = torch.rand(n_points, 3, dtype=torch.float32) * 10.0
|
||||
vert_pos = torch.rand(N_POINTS, 3, dtype=torch.float32) * 10.0
|
||||
vert_pos[:, 2] += 25.0
|
||||
vert_pos[:, :2] -= 5.0
|
||||
self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=False))
|
||||
self.register_parameter(
|
||||
"vert_col",
|
||||
nn.Parameter(
|
||||
torch.rand(n_points, 3, dtype=torch.float32),
|
||||
torch.rand(N_POINTS, 3, dtype=torch.float32),
|
||||
requires_grad=False,
|
||||
),
|
||||
)
|
||||
self.register_parameter(
|
||||
"vert_rad",
|
||||
nn.Parameter(
|
||||
torch.rand(n_points, dtype=torch.float32),
|
||||
torch.rand(N_POINTS, dtype=torch.float32),
|
||||
requires_grad=False,
|
||||
),
|
||||
)
|
||||
@ -118,11 +123,11 @@ class SceneModel(nn.Module):
|
||||
focal_length=self.focal_length,
|
||||
R=self.cam_rot[None, ...],
|
||||
T=self.cam_pos[None, ...],
|
||||
image_size=((width, height),),
|
||||
device=device,
|
||||
image_size=((WIDTH, HEIGHT),),
|
||||
device=DEVICE,
|
||||
)
|
||||
raster_settings = PointsRasterizationSettings(
|
||||
image_size=(width, height),
|
||||
image_size=(WIDTH, HEIGHT),
|
||||
radius=self.vert_rad,
|
||||
)
|
||||
rasterizer = PointsRasterizer(
|
||||
@ -142,7 +147,7 @@ class SceneModel(nn.Module):
|
||||
zfar=(45.0,),
|
||||
znear=(1.0,),
|
||||
radius_world=True,
|
||||
bg_col=torch.ones((3,), dtype=torch.float32, device=device),
|
||||
bg_col=torch.ones((3,), dtype=torch.float32, device=DEVICE),
|
||||
# As mentioned above: workaround for device placement of gradients for
|
||||
# camera parameters.
|
||||
focal_length=self.focal_length,
|
||||
@ -151,60 +156,73 @@ class SceneModel(nn.Module):
|
||||
)[0]
|
||||
|
||||
|
||||
# Load reference.
|
||||
ref = (
|
||||
torch.from_numpy(
|
||||
imageio.imread(
|
||||
"../../tests/pulsar/reference/examples_TestRenderer_test_cam.png"
|
||||
)[:, ::-1, :].copy()
|
||||
).to(torch.float32)
|
||||
/ 255.0
|
||||
).to(device)
|
||||
# Set up model.
|
||||
model = SceneModel().to(device)
|
||||
# Optimizer.
|
||||
optimizer = optim.SGD(
|
||||
[
|
||||
{"params": [model.cam_pos], "lr": 1e-4},
|
||||
{"params": [model.cam_rot], "lr": 5e-6},
|
||||
# Using a higher lr for the focal length here, because
|
||||
# the sensor width can not be optimized directly.
|
||||
{"params": [model.focal_length], "lr": 1e-3},
|
||||
]
|
||||
)
|
||||
def cli():
|
||||
"""
|
||||
Camera optimization example using pulsar.
|
||||
|
||||
print("Writing video to `%s`." % (path.abspath("cam-pt3d.gif")))
|
||||
writer = imageio.get_writer("cam-pt3d.gif", format="gif", fps=25)
|
||||
|
||||
# Optimize.
|
||||
for i in range(300):
|
||||
optimizer.zero_grad()
|
||||
result = model()
|
||||
# Visualize.
|
||||
result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
|
||||
cv2.imshow("opt", result_im[:, :, ::-1])
|
||||
writer.append_data(result_im)
|
||||
overlay_img = np.ascontiguousarray(
|
||||
((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
|
||||
:, :, ::-1
|
||||
Writes to `cam.gif`.
|
||||
"""
|
||||
LOGGER.info("Loading reference...")
|
||||
# Load reference.
|
||||
ref = (
|
||||
torch.from_numpy(
|
||||
imageio.imread(
|
||||
"../../tests/pulsar/reference/examples_TestRenderer_test_cam.png"
|
||||
)[:, ::-1, :].copy()
|
||||
).to(torch.float32)
|
||||
/ 255.0
|
||||
).to(DEVICE)
|
||||
# Set up model.
|
||||
model = SceneModel().to(DEVICE)
|
||||
# Optimizer.
|
||||
optimizer = optim.SGD(
|
||||
[
|
||||
{"params": [model.cam_pos], "lr": 1e-4},
|
||||
{"params": [model.cam_rot], "lr": 5e-6},
|
||||
# Using a higher lr for the focal length here, because
|
||||
# the sensor width can not be optimized directly.
|
||||
{"params": [model.focal_length], "lr": 1e-3},
|
||||
]
|
||||
)
|
||||
overlay_img = cv2.putText(
|
||||
overlay_img,
|
||||
"Step %d" % (i),
|
||||
(10, 40),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
1,
|
||||
(0, 0, 0),
|
||||
2,
|
||||
cv2.LINE_AA,
|
||||
False,
|
||||
)
|
||||
cv2.imshow("overlay", overlay_img)
|
||||
cv2.waitKey(1)
|
||||
# Update.
|
||||
loss = ((result - ref) ** 2).sum()
|
||||
print("loss {}: {}".format(i, loss.item()))
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
writer.close()
|
||||
|
||||
LOGGER.info("Writing video to `%s`.", path.abspath("cam-pt3d.gif"))
|
||||
writer = imageio.get_writer("cam-pt3d.gif", format="gif", fps=25)
|
||||
|
||||
# Optimize.
|
||||
for i in range(300):
|
||||
optimizer.zero_grad()
|
||||
result = model()
|
||||
# Visualize.
|
||||
result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
|
||||
cv2.imshow("opt", result_im[:, :, ::-1])
|
||||
writer.append_data(result_im)
|
||||
overlay_img = np.ascontiguousarray(
|
||||
((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
|
||||
:, :, ::-1
|
||||
]
|
||||
)
|
||||
overlay_img = cv2.putText(
|
||||
overlay_img,
|
||||
"Step %d" % (i),
|
||||
(10, 40),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
1,
|
||||
(0, 0, 0),
|
||||
2,
|
||||
cv2.LINE_AA,
|
||||
False,
|
||||
)
|
||||
cv2.imshow("overlay", overlay_img)
|
||||
cv2.waitKey(1)
|
||||
# Update.
|
||||
loss = ((result - ref) ** 2).sum()
|
||||
LOGGER.info("loss %d: %f", i, loss.item())
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
writer.close()
|
||||
LOGGER.info("Done.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
cli()
|
||||
|
@ -3,7 +3,8 @@
|
||||
"""
|
||||
This example demonstrates multiview 3D reconstruction using the plain
|
||||
pulsar interface. For this, reference images have been pre-generated
|
||||
(you can find them at `../../tests/pulsar/reference/examples_TestRenderer_test_multiview_%d.png`).
|
||||
(you can find them at
|
||||
`../../tests/pulsar/reference/examples_TestRenderer_test_multiview_%d.png`).
|
||||
The camera parameters are assumed given. The scene is initialized with
|
||||
random spheres. Gradient-based optimization is used to optimize sphere
|
||||
parameters and prune spheres to converge to a 3D representation.
|
||||
@ -14,6 +15,7 @@ structures yet.
|
||||
"""
|
||||
import math
|
||||
from os import path
|
||||
import logging
|
||||
|
||||
import cv2
|
||||
import imageio
|
||||
@ -23,11 +25,12 @@ from pytorch3d.renderer.points.pulsar import Renderer
|
||||
from torch import nn, optim
|
||||
|
||||
|
||||
n_points = 400_000
|
||||
width = 1_000
|
||||
height = 1_000
|
||||
visualize_ids = [0, 1]
|
||||
device = torch.device("cuda")
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
N_POINTS = 400_000
|
||||
WIDTH = 1_000
|
||||
HEIGHT = 1_000
|
||||
VISUALIZE_IDS = [0, 1]
|
||||
DEVICE = torch.device("cuda")
|
||||
|
||||
|
||||
class SceneModel(nn.Module):
|
||||
@ -50,27 +53,27 @@ class SceneModel(nn.Module):
|
||||
self.gamma = 1.0
|
||||
# Points.
|
||||
torch.manual_seed(1)
|
||||
vert_pos = torch.rand((1, n_points, 3), dtype=torch.float32) * 10.0
|
||||
vert_pos = torch.rand((1, N_POINTS, 3), dtype=torch.float32) * 10.0
|
||||
vert_pos[:, :, 2] += 25.0
|
||||
vert_pos[:, :, :2] -= 5.0
|
||||
self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=True))
|
||||
self.register_parameter(
|
||||
"vert_col",
|
||||
nn.Parameter(
|
||||
torch.ones(1, n_points, 3, dtype=torch.float32) * 0.5,
|
||||
torch.ones(1, N_POINTS, 3, dtype=torch.float32) * 0.5,
|
||||
requires_grad=True,
|
||||
),
|
||||
)
|
||||
self.register_parameter(
|
||||
"vert_rad",
|
||||
nn.Parameter(
|
||||
torch.ones(1, n_points, dtype=torch.float32) * 0.05, requires_grad=True
|
||||
torch.ones(1, N_POINTS, dtype=torch.float32) * 0.05, requires_grad=True
|
||||
),
|
||||
)
|
||||
self.register_parameter(
|
||||
"vert_opy",
|
||||
nn.Parameter(
|
||||
torch.ones(1, n_points, dtype=torch.float32), requires_grad=True
|
||||
torch.ones(1, N_POINTS, dtype=torch.float32), requires_grad=True
|
||||
),
|
||||
)
|
||||
self.register_buffer(
|
||||
@ -92,7 +95,7 @@ class SceneModel(nn.Module):
|
||||
dtype=torch.float32,
|
||||
),
|
||||
)
|
||||
self.renderer = Renderer(width, height, n_points, right_handed_system=True)
|
||||
self.renderer = Renderer(WIDTH, HEIGHT, N_POINTS, right_handed_system=True)
|
||||
|
||||
def forward(self, cam=None):
|
||||
if cam is None:
|
||||
@ -110,97 +113,113 @@ class SceneModel(nn.Module):
|
||||
)
|
||||
|
||||
|
||||
# Load reference.
|
||||
ref = torch.stack(
|
||||
[
|
||||
torch.from_numpy(
|
||||
imageio.imread(
|
||||
"../../tests/pulsar/reference/examples_TestRenderer_test_multiview_%d.png"
|
||||
% idx
|
||||
)
|
||||
).to(torch.float32)
|
||||
/ 255.0
|
||||
for idx in range(8)
|
||||
]
|
||||
).to(device)
|
||||
# Set up model.
|
||||
model = SceneModel().to(device)
|
||||
# Optimizer.
|
||||
optimizer = optim.SGD(
|
||||
[
|
||||
{"params": [model.vert_col], "lr": 1e-1},
|
||||
{"params": [model.vert_rad], "lr": 1e-3},
|
||||
{"params": [model.vert_pos], "lr": 1e-3},
|
||||
]
|
||||
)
|
||||
def cli():
|
||||
"""
|
||||
Simple demonstration for a multi-view 3D reconstruction using pulsar.
|
||||
|
||||
# For visualization.
|
||||
angle = 0.0
|
||||
print("Writing video to `%s`." % (path.abspath("multiview.avi")))
|
||||
writer = imageio.get_writer("multiview.gif", format="gif", fps=25)
|
||||
This example makes use of opacity, which is not yet supported through
|
||||
the unified PyTorch3D interface.
|
||||
|
||||
# Optimize.
|
||||
for i in range(300):
|
||||
optimizer.zero_grad()
|
||||
result = model()
|
||||
# Visualize.
|
||||
result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
|
||||
cv2.imshow("opt", result_im[0, :, :, ::-1])
|
||||
overlay_img = np.ascontiguousarray(
|
||||
((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
|
||||
0, :, :, ::-1
|
||||
Writes to `multiview.gif`.
|
||||
"""
|
||||
LOGGER.info("Loading reference...")
|
||||
# Load reference.
|
||||
ref = torch.stack(
|
||||
[
|
||||
torch.from_numpy(
|
||||
imageio.imread(
|
||||
"../../tests/pulsar/reference/examples_TestRenderer_test_multiview_%d.png"
|
||||
% idx
|
||||
)
|
||||
).to(torch.float32)
|
||||
/ 255.0
|
||||
for idx in range(8)
|
||||
]
|
||||
).to(DEVICE)
|
||||
# Set up model.
|
||||
model = SceneModel().to(DEVICE)
|
||||
# Optimizer.
|
||||
optimizer = optim.SGD(
|
||||
[
|
||||
{"params": [model.vert_col], "lr": 1e-1},
|
||||
{"params": [model.vert_rad], "lr": 1e-3},
|
||||
{"params": [model.vert_pos], "lr": 1e-3},
|
||||
]
|
||||
)
|
||||
overlay_img = cv2.putText(
|
||||
overlay_img,
|
||||
"Step %d" % (i),
|
||||
(10, 40),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
1,
|
||||
(0, 0, 0),
|
||||
2,
|
||||
cv2.LINE_AA,
|
||||
False,
|
||||
)
|
||||
cv2.imshow("overlay", overlay_img)
|
||||
cv2.waitKey(1)
|
||||
# Update.
|
||||
loss = ((result - ref) ** 2).sum()
|
||||
print("loss {}: {}".format(i, loss.item()))
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
# Cleanup.
|
||||
with torch.no_grad():
|
||||
model.vert_col.data = torch.clamp(model.vert_col.data, 0.0, 1.0)
|
||||
# Remove points.
|
||||
model.vert_pos.data[model.vert_rad < 0.001, :] = -1000.0
|
||||
model.vert_rad.data[model.vert_rad < 0.001] = 0.0001
|
||||
vd = (
|
||||
(model.vert_col - torch.ones(1, 1, 3, dtype=torch.float32).to(device))
|
||||
.abs()
|
||||
.sum(dim=2)
|
||||
)
|
||||
model.vert_pos.data[vd <= 0.2] = -1000.0
|
||||
# Rotating visualization.
|
||||
cam_control = torch.tensor(
|
||||
[
|
||||
[
|
||||
np.sin(angle) * 35.0,
|
||||
0.0,
|
||||
30.0 - np.cos(angle) * 35.0,
|
||||
0.0,
|
||||
-angle + math.pi,
|
||||
0.0,
|
||||
5.0,
|
||||
2.0,
|
||||
]
|
||||
],
|
||||
dtype=torch.float32,
|
||||
).to(device)
|
||||
with torch.no_grad():
|
||||
result = model.forward(cam=cam_control)[0]
|
||||
|
||||
# For visualization.
|
||||
angle = 0.0
|
||||
LOGGER.info("Writing video to `%s`.", path.abspath("multiview.avi"))
|
||||
writer = imageio.get_writer("multiview.gif", format="gif", fps=25)
|
||||
|
||||
# Optimize.
|
||||
for i in range(300):
|
||||
optimizer.zero_grad()
|
||||
result = model()
|
||||
# Visualize.
|
||||
result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
|
||||
cv2.imshow("vis", result_im[:, :, ::-1])
|
||||
writer.append_data(result_im)
|
||||
angle += 0.05
|
||||
writer.close()
|
||||
cv2.imshow("opt", result_im[0, :, :, ::-1])
|
||||
overlay_img = np.ascontiguousarray(
|
||||
((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
|
||||
0, :, :, ::-1
|
||||
]
|
||||
)
|
||||
overlay_img = cv2.putText(
|
||||
overlay_img,
|
||||
"Step %d" % (i),
|
||||
(10, 40),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
1,
|
||||
(0, 0, 0),
|
||||
2,
|
||||
cv2.LINE_AA,
|
||||
False,
|
||||
)
|
||||
cv2.imshow("overlay", overlay_img)
|
||||
cv2.waitKey(1)
|
||||
# Update.
|
||||
loss = ((result - ref) ** 2).sum()
|
||||
LOGGER.info("loss %d: %f", i, loss.item())
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
# Cleanup.
|
||||
with torch.no_grad():
|
||||
model.vert_col.data = torch.clamp(model.vert_col.data, 0.0, 1.0)
|
||||
# Remove points.
|
||||
model.vert_pos.data[model.vert_rad < 0.001, :] = -1000.0
|
||||
model.vert_rad.data[model.vert_rad < 0.001] = 0.0001
|
||||
vd = (
|
||||
(model.vert_col - torch.ones(1, 1, 3, dtype=torch.float32).to(DEVICE))
|
||||
.abs()
|
||||
.sum(dim=2)
|
||||
)
|
||||
model.vert_pos.data[vd <= 0.2] = -1000.0
|
||||
# Rotating visualization.
|
||||
cam_control = torch.tensor(
|
||||
[
|
||||
[
|
||||
np.sin(angle) * 35.0,
|
||||
0.0,
|
||||
30.0 - np.cos(angle) * 35.0,
|
||||
0.0,
|
||||
-angle + math.pi,
|
||||
0.0,
|
||||
5.0,
|
||||
2.0,
|
||||
]
|
||||
],
|
||||
dtype=torch.float32,
|
||||
).to(DEVICE)
|
||||
with torch.no_grad():
|
||||
result = model.forward(cam=cam_control)[0]
|
||||
result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
|
||||
cv2.imshow("vis", result_im[:, :, ::-1])
|
||||
writer.append_data(result_im)
|
||||
angle += 0.05
|
||||
writer.close()
|
||||
LOGGER.info("Done.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
cli()
|
||||
|
@ -9,6 +9,7 @@ optimization is used to converge towards a faithful
|
||||
scene representation.
|
||||
"""
|
||||
import math
|
||||
import logging
|
||||
|
||||
import cv2
|
||||
import imageio
|
||||
@ -18,10 +19,11 @@ from pytorch3d.renderer.points.pulsar import Renderer
|
||||
from torch import nn, optim
|
||||
|
||||
|
||||
n_points = 10_000
|
||||
width = 1_000
|
||||
height = 1_000
|
||||
device = torch.device("cuda")
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
N_POINTS = 10_000
|
||||
WIDTH = 1_000
|
||||
HEIGHT = 1_000
|
||||
DEVICE = torch.device("cuda")
|
||||
|
||||
|
||||
class SceneModel(nn.Module):
|
||||
@ -42,20 +44,20 @@ class SceneModel(nn.Module):
|
||||
self.gamma = 1.0
|
||||
# Points.
|
||||
torch.manual_seed(1)
|
||||
vert_pos = torch.rand(n_points, 3, dtype=torch.float32) * 10.0
|
||||
vert_pos = torch.rand(N_POINTS, 3, dtype=torch.float32) * 10.0
|
||||
vert_pos[:, 2] += 25.0
|
||||
vert_pos[:, :2] -= 5.0
|
||||
self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=True))
|
||||
self.register_parameter(
|
||||
"vert_col",
|
||||
nn.Parameter(
|
||||
torch.ones(n_points, 3, dtype=torch.float32) * 0.5, requires_grad=True
|
||||
torch.ones(N_POINTS, 3, dtype=torch.float32) * 0.5, requires_grad=True
|
||||
),
|
||||
)
|
||||
self.register_parameter(
|
||||
"vert_rad",
|
||||
nn.Parameter(
|
||||
torch.ones(n_points, dtype=torch.float32) * 0.3, requires_grad=True
|
||||
torch.ones(N_POINTS, dtype=torch.float32) * 0.3, requires_grad=True
|
||||
),
|
||||
)
|
||||
self.register_buffer(
|
||||
@ -67,7 +69,7 @@ class SceneModel(nn.Module):
|
||||
# The volumetric optimization works better with a higher number of tracked
|
||||
# intersections per ray.
|
||||
self.renderer = Renderer(
|
||||
width, height, n_points, n_track=32, right_handed_system=True
|
||||
WIDTH, HEIGHT, N_POINTS, n_track=32, right_handed_system=True
|
||||
)
|
||||
|
||||
def forward(self):
|
||||
@ -82,65 +84,76 @@ class SceneModel(nn.Module):
|
||||
)
|
||||
|
||||
|
||||
# Load reference.
|
||||
ref = (
|
||||
torch.from_numpy(
|
||||
imageio.imread(
|
||||
"../../tests/pulsar/reference/examples_TestRenderer_test_smallopt.png"
|
||||
)[:, ::-1, :].copy()
|
||||
).to(torch.float32)
|
||||
/ 255.0
|
||||
).to(device)
|
||||
# Set up model.
|
||||
model = SceneModel().to(device)
|
||||
# Optimizer.
|
||||
optimizer = optim.SGD(
|
||||
[
|
||||
{"params": [model.vert_col], "lr": 1e0},
|
||||
{"params": [model.vert_rad], "lr": 5e-3},
|
||||
{"params": [model.vert_pos], "lr": 1e-2},
|
||||
]
|
||||
)
|
||||
|
||||
# Optimize.
|
||||
for i in range(500):
|
||||
optimizer.zero_grad()
|
||||
result, result_info = model()
|
||||
# Visualize.
|
||||
result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
|
||||
cv2.imshow("opt", result_im[:, :, ::-1])
|
||||
overlay_img = np.ascontiguousarray(
|
||||
((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
|
||||
:, :, ::-1
|
||||
def cli():
|
||||
"""
|
||||
Scene optimization example using pulsar.
|
||||
"""
|
||||
LOGGER.info("Loading reference...")
|
||||
# Load reference.
|
||||
ref = (
|
||||
torch.from_numpy(
|
||||
imageio.imread(
|
||||
"../../tests/pulsar/reference/examples_TestRenderer_test_smallopt.png"
|
||||
)[:, ::-1, :].copy()
|
||||
).to(torch.float32)
|
||||
/ 255.0
|
||||
).to(DEVICE)
|
||||
# Set up model.
|
||||
model = SceneModel().to(DEVICE)
|
||||
# Optimizer.
|
||||
optimizer = optim.SGD(
|
||||
[
|
||||
{"params": [model.vert_col], "lr": 1e0},
|
||||
{"params": [model.vert_rad], "lr": 5e-3},
|
||||
{"params": [model.vert_pos], "lr": 1e-2},
|
||||
]
|
||||
)
|
||||
overlay_img = cv2.putText(
|
||||
overlay_img,
|
||||
"Step %d" % (i),
|
||||
(10, 40),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
1,
|
||||
(0, 0, 0),
|
||||
2,
|
||||
cv2.LINE_AA,
|
||||
False,
|
||||
)
|
||||
cv2.imshow("overlay", overlay_img)
|
||||
cv2.waitKey(1)
|
||||
# Update.
|
||||
loss = ((result - ref) ** 2).sum()
|
||||
print("loss {}: {}".format(i, loss.item()))
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
# Cleanup.
|
||||
with torch.no_grad():
|
||||
model.vert_col.data = torch.clamp(model.vert_col.data, 0.0, 1.0)
|
||||
# Remove points.
|
||||
model.vert_pos.data[model.vert_rad < 0.001, :] = -1000.0
|
||||
model.vert_rad.data[model.vert_rad < 0.001] = 0.0001
|
||||
vd = (
|
||||
(model.vert_col - torch.ones(3, dtype=torch.float32).to(device))
|
||||
.abs()
|
||||
.sum(dim=1)
|
||||
LOGGER.info("Optimizing...")
|
||||
# Optimize.
|
||||
for i in range(500):
|
||||
optimizer.zero_grad()
|
||||
result, result_info = model()
|
||||
# Visualize.
|
||||
result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
|
||||
cv2.imshow("opt", result_im[:, :, ::-1])
|
||||
overlay_img = np.ascontiguousarray(
|
||||
((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
|
||||
:, :, ::-1
|
||||
]
|
||||
)
|
||||
model.vert_pos.data[vd <= 0.2] = -1000.0
|
||||
overlay_img = cv2.putText(
|
||||
overlay_img,
|
||||
"Step %d" % (i),
|
||||
(10, 40),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
1,
|
||||
(0, 0, 0),
|
||||
2,
|
||||
cv2.LINE_AA,
|
||||
False,
|
||||
)
|
||||
cv2.imshow("overlay", overlay_img)
|
||||
cv2.waitKey(1)
|
||||
# Update.
|
||||
loss = ((result - ref) ** 2).sum()
|
||||
LOGGER.info("loss %d: %f", i, loss.item())
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
# Cleanup.
|
||||
with torch.no_grad():
|
||||
model.vert_col.data = torch.clamp(model.vert_col.data, 0.0, 1.0)
|
||||
# Remove points.
|
||||
model.vert_pos.data[model.vert_rad < 0.001, :] = -1000.0
|
||||
model.vert_rad.data[model.vert_rad < 0.001] = 0.0001
|
||||
vd = (
|
||||
(model.vert_col - torch.ones(3, dtype=torch.float32).to(DEVICE))
|
||||
.abs()
|
||||
.sum(dim=1)
|
||||
)
|
||||
model.vert_pos.data[vd <= 0.2] = -1000.0
|
||||
LOGGER.info("Done.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
cli()
|
||||
|
@ -9,11 +9,15 @@ optimization is used to converge towards a faithful
|
||||
scene representation.
|
||||
"""
|
||||
import math
|
||||
import logging
|
||||
|
||||
import cv2
|
||||
import imageio
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
# Import `look_at_view_transform` as needed in the suggestion later in the
|
||||
# example.
|
||||
from pytorch3d.renderer.cameras import PerspectiveCameras # , look_at_view_transform
|
||||
from pytorch3d.renderer.points import (
|
||||
PointsRasterizationSettings,
|
||||
@ -24,10 +28,11 @@ from pytorch3d.structures.pointclouds import Pointclouds
|
||||
from torch import nn, optim
|
||||
|
||||
|
||||
n_points = 10_000
|
||||
width = 1_000
|
||||
height = 1_000
|
||||
device = torch.device("cuda")
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
N_POINTS = 10_000
|
||||
WIDTH = 1_000
|
||||
HEIGHT = 1_000
|
||||
DEVICE = torch.device("cuda")
|
||||
|
||||
|
||||
class SceneModel(nn.Module):
|
||||
@ -48,21 +53,21 @@ class SceneModel(nn.Module):
|
||||
self.gamma = 1.0
|
||||
# Points.
|
||||
torch.manual_seed(1)
|
||||
vert_pos = torch.rand(n_points, 3, dtype=torch.float32, device=device) * 10.0
|
||||
vert_pos = torch.rand(N_POINTS, 3, dtype=torch.float32, device=DEVICE) * 10.0
|
||||
vert_pos[:, 2] += 25.0
|
||||
vert_pos[:, :2] -= 5.0
|
||||
self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=True))
|
||||
self.register_parameter(
|
||||
"vert_col",
|
||||
nn.Parameter(
|
||||
torch.ones(n_points, 3, dtype=torch.float32, device=device) * 0.5,
|
||||
torch.ones(N_POINTS, 3, dtype=torch.float32, device=DEVICE) * 0.5,
|
||||
requires_grad=True,
|
||||
),
|
||||
)
|
||||
self.register_parameter(
|
||||
"vert_rad",
|
||||
nn.Parameter(
|
||||
torch.ones(n_points, dtype=torch.float32) * 0.3, requires_grad=True
|
||||
torch.ones(N_POINTS, dtype=torch.float32) * 0.3, requires_grad=True
|
||||
),
|
||||
)
|
||||
self.register_buffer(
|
||||
@ -77,13 +82,13 @@ class SceneModel(nn.Module):
|
||||
# sensor width (see the pulsar example). This means we need here
|
||||
# 5.0 * 2.0 / 2.0 to get the equivalent results as in pulsar.
|
||||
focal_length=5.0,
|
||||
R=torch.eye(3, dtype=torch.float32, device=device)[None, ...],
|
||||
T=torch.zeros((1, 3), dtype=torch.float32, device=device),
|
||||
image_size=((width, height),),
|
||||
device=device,
|
||||
R=torch.eye(3, dtype=torch.float32, device=DEVICE)[None, ...],
|
||||
T=torch.zeros((1, 3), dtype=torch.float32, device=DEVICE),
|
||||
image_size=((WIDTH, HEIGHT),),
|
||||
device=DEVICE,
|
||||
)
|
||||
raster_settings = PointsRasterizationSettings(
|
||||
image_size=(width, height),
|
||||
image_size=(WIDTH, HEIGHT),
|
||||
radius=self.vert_rad,
|
||||
)
|
||||
rasterizer = PointsRasterizer(
|
||||
@ -103,69 +108,80 @@ class SceneModel(nn.Module):
|
||||
zfar=(45.0,),
|
||||
znear=(1.0,),
|
||||
radius_world=True,
|
||||
bg_col=torch.ones((3,), dtype=torch.float32, device=device),
|
||||
bg_col=torch.ones((3,), dtype=torch.float32, device=DEVICE),
|
||||
)[0]
|
||||
|
||||
|
||||
# Load reference.
|
||||
ref = (
|
||||
torch.from_numpy(
|
||||
imageio.imread(
|
||||
"../../tests/pulsar/reference/examples_TestRenderer_test_smallopt.png"
|
||||
)[:, ::-1, :].copy()
|
||||
).to(torch.float32)
|
||||
/ 255.0
|
||||
).to(device)
|
||||
# Set up model.
|
||||
model = SceneModel().to(device)
|
||||
# Optimizer.
|
||||
optimizer = optim.SGD(
|
||||
[
|
||||
{"params": [model.vert_col], "lr": 1e0},
|
||||
{"params": [model.vert_rad], "lr": 5e-3},
|
||||
{"params": [model.vert_pos], "lr": 1e-2},
|
||||
]
|
||||
)
|
||||
|
||||
# Optimize.
|
||||
for i in range(500):
|
||||
optimizer.zero_grad()
|
||||
result = model()
|
||||
# Visualize.
|
||||
result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
|
||||
cv2.imshow("opt", result_im[:, :, ::-1])
|
||||
overlay_img = np.ascontiguousarray(
|
||||
((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
|
||||
:, :, ::-1
|
||||
def cli():
|
||||
"""
|
||||
Scene optimization example using pulsar and the unified PyTorch3D interface.
|
||||
"""
|
||||
LOGGER.info("Loading reference...")
|
||||
# Load reference.
|
||||
ref = (
|
||||
torch.from_numpy(
|
||||
imageio.imread(
|
||||
"../../tests/pulsar/reference/examples_TestRenderer_test_smallopt.png"
|
||||
)[:, ::-1, :].copy()
|
||||
).to(torch.float32)
|
||||
/ 255.0
|
||||
).to(DEVICE)
|
||||
# Set up model.
|
||||
model = SceneModel().to(DEVICE)
|
||||
# Optimizer.
|
||||
optimizer = optim.SGD(
|
||||
[
|
||||
{"params": [model.vert_col], "lr": 1e0},
|
||||
{"params": [model.vert_rad], "lr": 5e-3},
|
||||
{"params": [model.vert_pos], "lr": 1e-2},
|
||||
]
|
||||
)
|
||||
overlay_img = cv2.putText(
|
||||
overlay_img,
|
||||
"Step %d" % (i),
|
||||
(10, 40),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
1,
|
||||
(0, 0, 0),
|
||||
2,
|
||||
cv2.LINE_AA,
|
||||
False,
|
||||
)
|
||||
cv2.imshow("overlay", overlay_img)
|
||||
cv2.waitKey(1)
|
||||
# Update.
|
||||
loss = ((result - ref) ** 2).sum()
|
||||
print("loss {}: {}".format(i, loss.item()))
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
# Cleanup.
|
||||
with torch.no_grad():
|
||||
model.vert_col.data = torch.clamp(model.vert_col.data, 0.0, 1.0)
|
||||
# Remove points.
|
||||
model.vert_pos.data[model.vert_rad < 0.001, :] = -1000.0
|
||||
model.vert_rad.data[model.vert_rad < 0.001] = 0.0001
|
||||
vd = (
|
||||
(model.vert_col - torch.ones(3, dtype=torch.float32).to(device))
|
||||
.abs()
|
||||
.sum(dim=1)
|
||||
LOGGER.info("Optimizing...")
|
||||
# Optimize.
|
||||
for i in range(500):
|
||||
optimizer.zero_grad()
|
||||
result = model()
|
||||
# Visualize.
|
||||
result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
|
||||
cv2.imshow("opt", result_im[:, :, ::-1])
|
||||
overlay_img = np.ascontiguousarray(
|
||||
((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
|
||||
:, :, ::-1
|
||||
]
|
||||
)
|
||||
model.vert_pos.data[vd <= 0.2] = -1000.0
|
||||
overlay_img = cv2.putText(
|
||||
overlay_img,
|
||||
"Step %d" % (i),
|
||||
(10, 40),
|
||||
cv2.FONT_HERSHEY_SIMPLEX,
|
||||
1,
|
||||
(0, 0, 0),
|
||||
2,
|
||||
cv2.LINE_AA,
|
||||
False,
|
||||
)
|
||||
cv2.imshow("overlay", overlay_img)
|
||||
cv2.waitKey(1)
|
||||
# Update.
|
||||
loss = ((result - ref) ** 2).sum()
|
||||
LOGGER.info("loss %d: %f", i, loss.item())
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
# Cleanup.
|
||||
with torch.no_grad():
|
||||
model.vert_col.data = torch.clamp(model.vert_col.data, 0.0, 1.0)
|
||||
# Remove points.
|
||||
model.vert_pos.data[model.vert_rad < 0.001, :] = -1000.0
|
||||
model.vert_rad.data[model.vert_rad < 0.001] = 0.0001
|
||||
vd = (
|
||||
(model.vert_col - torch.ones(3, dtype=torch.float32).to(DEVICE))
|
||||
.abs()
|
||||
.sum(dim=1)
|
||||
)
|
||||
model.vert_pos.data[vd <= 0.2] = -1000.0
|
||||
LOGGER.info("Done.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
cli()
|
||||
|
@ -44,6 +44,8 @@ class TestDepth(TestCaseMixin, unittest.TestCase):
|
||||
n_channels=1,
|
||||
).to(device)
|
||||
data = torch.load(IN_REF_FP, map_location="cpu")
|
||||
# For creating the reference files.
|
||||
# Use in case of updates.
|
||||
# data["pos"] = torch.rand_like(data["pos"])
|
||||
# data["pos"][:, 0] = data["pos"][:, 0] * 2. - 1.
|
||||
# data["pos"][:, 1] = data["pos"][:, 1] * 2. - 1.
|
||||
@ -74,6 +76,8 @@ class TestDepth(TestCaseMixin, unittest.TestCase):
|
||||
),
|
||||
depth_vis.cpu().numpy().astype(np.uint8),
|
||||
)
|
||||
# For creating the reference files.
|
||||
# Use in case of updates.
|
||||
# torch.save(
|
||||
# data, path.join(path.dirname(__file__), "reference", "nr0000-in.pth")
|
||||
# )
|
||||
|
@ -123,7 +123,7 @@ class TestSmallSpheres(unittest.TestCase):
|
||||
self.assertTrue(
|
||||
(sphere_ids == idx).sum() > 0, "Sphere ID %d missing!" % (idx)
|
||||
)
|
||||
# Visualize.
|
||||
# Visualization code. Activate for debugging.
|
||||
# result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
|
||||
# cv2.imshow("res", result_im[0, :, :, ::-1])
|
||||
# cv2.waitKey(0)
|
||||
|
Loading…
x
Reference in New Issue
Block a user