mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-12-20 22:30:35 +08:00
Texturing API updates
Summary: A fairly big refactor of the texturing API with some breaking changes to how textures are defined. Main changes: - There are now 3 types of texture classes: `TexturesUV`, `TexturesAtlas` and `TexturesVertex`. Each class: - has a `sample_textures` function which accepts the `fragments` from rasterization and returns `texels`. This means that the shaders will not need to know the type of the mesh texture which will resolve several issues people were reporting on GitHub. - has a `join_batch` method for joining multiple textures of the same type into a batch Reviewed By: gkioxari Differential Revision: D21067427 fbshipit-source-id: 4b346500a60181e72fdd1b0dd89b5505c7a33926
This commit is contained in:
committed by
Facebook GitHub Bot
parent
b73d3d6ed9
commit
a3932960b3
@@ -13,8 +13,8 @@ from pytorch3d.renderer import (
|
||||
OpenGLPerspectiveCameras,
|
||||
PointLights,
|
||||
RasterizationSettings,
|
||||
TexturesVertex,
|
||||
)
|
||||
from pytorch3d.structures import Textures
|
||||
|
||||
|
||||
class ShapeNetBase(torch.utils.data.Dataset):
|
||||
@@ -113,8 +113,8 @@ class ShapeNetBase(torch.utils.data.Dataset):
|
||||
"""
|
||||
paths = self._handle_render_inputs(model_ids, categories, sample_nums, idxs)
|
||||
meshes = load_objs_as_meshes(paths, device=device, load_textures=False)
|
||||
meshes.textures = Textures(
|
||||
verts_rgb=torch.ones_like(meshes.verts_padded(), device=device)
|
||||
meshes.textures = TexturesVertex(
|
||||
verts_features=torch.ones_like(meshes.verts_padded(), device=device)
|
||||
)
|
||||
cameras = kwargs.get("cameras", OpenGLPerspectiveCameras()).to(device)
|
||||
renderer = MeshRenderer(
|
||||
|
||||
@@ -11,7 +11,8 @@ import numpy as np
|
||||
import torch
|
||||
from pytorch3d.io.mtl_io import load_mtl, make_mesh_texture_atlas
|
||||
from pytorch3d.io.utils import _open_file
|
||||
from pytorch3d.structures import Meshes, Textures, join_meshes_as_batch
|
||||
from pytorch3d.renderer import TexturesAtlas, TexturesUV
|
||||
from pytorch3d.structures import Meshes, join_meshes_as_batch
|
||||
|
||||
|
||||
def _make_tensor(data, cols: int, dtype: torch.dtype, device="cpu") -> torch.Tensor:
|
||||
@@ -41,6 +42,10 @@ def _format_faces_indices(faces_indices, max_index, device, pad_value=None):
|
||||
Args:
|
||||
faces_indices: List of ints of indices.
|
||||
max_index: Max index for the face property.
|
||||
pad_value: if any of the face_indices are padded, specify
|
||||
the value of the padding (e.g. -1). This is only used
|
||||
for texture indices indices where there might
|
||||
not be texture information for all the faces.
|
||||
|
||||
Returns:
|
||||
faces_indices: List of ints of indices.
|
||||
@@ -65,7 +70,9 @@ def _format_faces_indices(faces_indices, max_index, device, pad_value=None):
|
||||
faces_indices[mask] = pad_value
|
||||
|
||||
# Check indices are valid.
|
||||
if torch.any(faces_indices >= max_index) or torch.any(faces_indices < 0):
|
||||
if torch.any(faces_indices >= max_index) or (
|
||||
pad_value is None and torch.any(faces_indices < 0)
|
||||
):
|
||||
warnings.warn("Faces have invalid indices")
|
||||
|
||||
return faces_indices
|
||||
@@ -227,7 +234,14 @@ def load_obj(
|
||||
)
|
||||
|
||||
|
||||
def load_objs_as_meshes(files: list, device=None, load_textures: bool = True):
|
||||
def load_objs_as_meshes(
|
||||
files: list,
|
||||
device=None,
|
||||
load_textures: bool = True,
|
||||
create_texture_atlas: bool = False,
|
||||
texture_atlas_size: int = 4,
|
||||
texture_wrap: Optional[str] = "repeat",
|
||||
):
|
||||
"""
|
||||
Load meshes from a list of .obj files using the load_obj function, and
|
||||
return them as a Meshes object. This only works for meshes which have a
|
||||
@@ -246,18 +260,31 @@ def load_objs_as_meshes(files: list, device=None, load_textures: bool = True):
|
||||
"""
|
||||
mesh_list = []
|
||||
for f_obj in files:
|
||||
# TODO: update this function to support the two texturing options.
|
||||
verts, faces, aux = load_obj(f_obj, load_textures=load_textures)
|
||||
verts = verts.to(device)
|
||||
verts, faces, aux = load_obj(
|
||||
f_obj,
|
||||
load_textures=load_textures,
|
||||
create_texture_atlas=create_texture_atlas,
|
||||
texture_atlas_size=texture_atlas_size,
|
||||
texture_wrap=texture_wrap,
|
||||
)
|
||||
tex = None
|
||||
tex_maps = aux.texture_images
|
||||
if tex_maps is not None and len(tex_maps) > 0:
|
||||
verts_uvs = aux.verts_uvs[None, ...].to(device) # (1, V, 2)
|
||||
faces_uvs = faces.textures_idx[None, ...].to(device) # (1, F, 3)
|
||||
image = list(tex_maps.values())[0].to(device)[None]
|
||||
tex = Textures(verts_uvs=verts_uvs, faces_uvs=faces_uvs, maps=image)
|
||||
if create_texture_atlas:
|
||||
# TexturesAtlas type
|
||||
tex = TexturesAtlas(atlas=[aux.texture_atlas])
|
||||
else:
|
||||
# TexturesUV type
|
||||
tex_maps = aux.texture_images
|
||||
if tex_maps is not None and len(tex_maps) > 0:
|
||||
verts_uvs = aux.verts_uvs.to(device) # (V, 2)
|
||||
faces_uvs = faces.textures_idx.to(device) # (F, 3)
|
||||
image = list(tex_maps.values())[0].to(device)[None]
|
||||
tex = TexturesUV(
|
||||
verts_uvs=[verts_uvs], faces_uvs=[faces_uvs], maps=image
|
||||
)
|
||||
|
||||
mesh = Meshes(verts=[verts], faces=[faces.verts_idx.to(device)], textures=tex)
|
||||
mesh = Meshes(
|
||||
verts=[verts.to(device)], faces=[faces.verts_idx.to(device)], textures=tex
|
||||
)
|
||||
mesh_list.append(mesh)
|
||||
if len(mesh_list) == 1:
|
||||
return mesh_list[0]
|
||||
|
||||
@@ -28,11 +28,11 @@ from .mesh import (
|
||||
SoftGouraudShader,
|
||||
SoftPhongShader,
|
||||
SoftSilhouetteShader,
|
||||
TexturedSoftPhongShader,
|
||||
Textures,
|
||||
TexturesAtlas,
|
||||
TexturesUV,
|
||||
TexturesVertex,
|
||||
gouraud_shading,
|
||||
interpolate_face_attributes,
|
||||
interpolate_texture_map,
|
||||
interpolate_vertex_colors,
|
||||
phong_shading,
|
||||
rasterize_meshes,
|
||||
)
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
|
||||
from .texturing import interpolate_texture_map, interpolate_vertex_colors # isort:skip
|
||||
from .rasterize_meshes import rasterize_meshes
|
||||
from .rasterizer import MeshRasterizer, RasterizationSettings
|
||||
from .renderer import MeshRenderer
|
||||
from .shader import TexturedSoftPhongShader # DEPRECATED
|
||||
from .shader import (
|
||||
HardFlatShader,
|
||||
HardGouraudShader,
|
||||
@@ -12,10 +12,10 @@ from .shader import (
|
||||
SoftGouraudShader,
|
||||
SoftPhongShader,
|
||||
SoftSilhouetteShader,
|
||||
TexturedSoftPhongShader,
|
||||
)
|
||||
from .shading import gouraud_shading, phong_shading
|
||||
from .utils import interpolate_face_attributes
|
||||
from .textures import Textures # DEPRECATED
|
||||
from .textures import TexturesAtlas, TexturesUV, TexturesVertex
|
||||
|
||||
|
||||
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
@@ -13,7 +14,6 @@ from ..blending import (
|
||||
from ..lighting import PointLights
|
||||
from ..materials import Materials
|
||||
from .shading import flat_shading, gouraud_shading, phong_shading
|
||||
from .texturing import interpolate_texture_map, interpolate_vertex_colors
|
||||
|
||||
|
||||
# A Shader should take as input fragments from the output of rasterization
|
||||
@@ -57,7 +57,7 @@ class HardPhongShader(nn.Module):
|
||||
or in the forward pass of HardPhongShader"
|
||||
raise ValueError(msg)
|
||||
|
||||
texels = interpolate_vertex_colors(fragments, meshes)
|
||||
texels = meshes.sample_textures(fragments)
|
||||
lights = kwargs.get("lights", self.lights)
|
||||
materials = kwargs.get("materials", self.materials)
|
||||
blend_params = kwargs.get("blend_params", self.blend_params)
|
||||
@@ -104,9 +104,11 @@ class SoftPhongShader(nn.Module):
|
||||
msg = "Cameras must be specified either at initialization \
|
||||
or in the forward pass of SoftPhongShader"
|
||||
raise ValueError(msg)
|
||||
texels = interpolate_vertex_colors(fragments, meshes)
|
||||
|
||||
texels = meshes.sample_textures(fragments)
|
||||
lights = kwargs.get("lights", self.lights)
|
||||
materials = kwargs.get("materials", self.materials)
|
||||
blend_params = kwargs.get("blend_params", self.blend_params)
|
||||
colors = phong_shading(
|
||||
meshes=meshes,
|
||||
fragments=fragments,
|
||||
@@ -115,7 +117,7 @@ class SoftPhongShader(nn.Module):
|
||||
cameras=cameras,
|
||||
materials=materials,
|
||||
)
|
||||
images = softmax_rgb_blend(colors, fragments, self.blend_params)
|
||||
images = softmax_rgb_blend(colors, fragments, blend_params)
|
||||
return images
|
||||
|
||||
|
||||
@@ -154,6 +156,12 @@ class HardGouraudShader(nn.Module):
|
||||
lights = kwargs.get("lights", self.lights)
|
||||
materials = kwargs.get("materials", self.materials)
|
||||
blend_params = kwargs.get("blend_params", self.blend_params)
|
||||
|
||||
# As Gouraud shading applies the illumination to the vertex
|
||||
# colors, the interpolated pixel texture is calculated in the
|
||||
# shading step. In comparison, for Phong shading, the pixel
|
||||
# textures are computed first after which the illumination is
|
||||
# applied.
|
||||
pixel_colors = gouraud_shading(
|
||||
meshes=meshes,
|
||||
fragments=fragments,
|
||||
@@ -210,54 +218,25 @@ class SoftGouraudShader(nn.Module):
|
||||
return images
|
||||
|
||||
|
||||
class TexturedSoftPhongShader(nn.Module):
|
||||
def TexturedSoftPhongShader(
|
||||
device="cpu", cameras=None, lights=None, materials=None, blend_params=None
|
||||
):
|
||||
"""
|
||||
Per pixel lighting applied to a texture map. First interpolate the vertex
|
||||
uv coordinates and sample from a texture map. Then apply the lighting model
|
||||
using the interpolated coords and normals for each pixel.
|
||||
|
||||
The blending function returns the soft aggregated color using all
|
||||
the faces per pixel.
|
||||
|
||||
To use the default values, simply initialize the shader with the desired
|
||||
device e.g.
|
||||
|
||||
.. code-block::
|
||||
|
||||
shader = TexturedPhongShader(device=torch.device("cuda:0"))
|
||||
TexturedSoftPhongShader class has been DEPRECATED. Use SoftPhongShader instead.
|
||||
Preserving TexturedSoftPhongShader as a function for backwards compatibility.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, device="cpu", cameras=None, lights=None, materials=None, blend_params=None
|
||||
):
|
||||
super().__init__()
|
||||
self.lights = lights if lights is not None else PointLights(device=device)
|
||||
self.materials = (
|
||||
materials if materials is not None else Materials(device=device)
|
||||
)
|
||||
self.cameras = cameras
|
||||
self.blend_params = blend_params if blend_params is not None else BlendParams()
|
||||
|
||||
def forward(self, fragments, meshes, **kwargs) -> torch.Tensor:
|
||||
cameras = kwargs.get("cameras", self.cameras)
|
||||
if cameras is None:
|
||||
msg = "Cameras must be specified either at initialization \
|
||||
or in the forward pass of TexturedSoftPhongShader"
|
||||
raise ValueError(msg)
|
||||
texels = interpolate_texture_map(fragments, meshes)
|
||||
lights = kwargs.get("lights", self.lights)
|
||||
materials = kwargs.get("materials", self.materials)
|
||||
blend_params = kwargs.get("blend_params", self.blend_params)
|
||||
colors = phong_shading(
|
||||
meshes=meshes,
|
||||
fragments=fragments,
|
||||
texels=texels,
|
||||
lights=lights,
|
||||
cameras=cameras,
|
||||
materials=materials,
|
||||
)
|
||||
images = softmax_rgb_blend(colors, fragments, blend_params)
|
||||
return images
|
||||
warnings.warn(
|
||||
"""TexturedSoftPhongShader is now deprecated;
|
||||
use SoftPhongShader instead.""",
|
||||
PendingDeprecationWarning,
|
||||
)
|
||||
return SoftPhongShader(
|
||||
device=device,
|
||||
cameras=cameras,
|
||||
lights=lights,
|
||||
materials=materials,
|
||||
blend_params=blend_params,
|
||||
)
|
||||
|
||||
|
||||
class HardFlatShader(nn.Module):
|
||||
@@ -291,7 +270,7 @@ class HardFlatShader(nn.Module):
|
||||
msg = "Cameras must be specified either at initialization \
|
||||
or in the forward pass of HardFlatShader"
|
||||
raise ValueError(msg)
|
||||
texels = interpolate_vertex_colors(fragments, meshes)
|
||||
texels = meshes.sample_textures(fragments)
|
||||
lights = kwargs.get("lights", self.lights)
|
||||
materials = kwargs.get("materials", self.materials)
|
||||
blend_params = kwargs.get("blend_params", self.blend_params)
|
||||
|
||||
@@ -6,6 +6,8 @@ from typing import Tuple
|
||||
import torch
|
||||
from pytorch3d.ops import interpolate_face_attributes
|
||||
|
||||
from .textures import TexturesVertex
|
||||
|
||||
|
||||
def _apply_lighting(
|
||||
points, normals, lights, cameras, materials
|
||||
@@ -91,6 +93,9 @@ def gouraud_shading(meshes, fragments, lights, cameras, materials) -> torch.Tens
|
||||
Then interpolate the vertex shaded colors using the barycentric coordinates
|
||||
to get a color per pixel.
|
||||
|
||||
Gouraud shading is only supported for meshes with texture type `TexturesVertex`.
|
||||
This is because the illumination is applied to the vertex colors.
|
||||
|
||||
Args:
|
||||
meshes: Batch of meshes
|
||||
fragments: Fragments named tuple with the outputs of rasterization
|
||||
@@ -101,10 +106,13 @@ def gouraud_shading(meshes, fragments, lights, cameras, materials) -> torch.Tens
|
||||
Returns:
|
||||
colors: (N, H, W, K, 3)
|
||||
"""
|
||||
if not isinstance(meshes.textures, TexturesVertex):
|
||||
raise ValueError("Mesh textures must be an instance of TexturesVertex")
|
||||
|
||||
faces = meshes.faces_packed() # (F, 3)
|
||||
verts = meshes.verts_packed()
|
||||
vertex_normals = meshes.verts_normals_packed() # (V, 3)
|
||||
vertex_colors = meshes.textures.verts_rgb_packed()
|
||||
verts = meshes.verts_packed() # (V, 3)
|
||||
verts_normals = meshes.verts_normals_packed() # (V, 3)
|
||||
verts_colors = meshes.textures.verts_features_packed() # (V, D)
|
||||
vert_to_mesh_idx = meshes.verts_packed_to_mesh_idx()
|
||||
|
||||
# Format properties of lights and materials so they are compatible
|
||||
@@ -119,9 +127,10 @@ def gouraud_shading(meshes, fragments, lights, cameras, materials) -> torch.Tens
|
||||
|
||||
# Calculate the illumination at each vertex
|
||||
ambient, diffuse, specular = _apply_lighting(
|
||||
verts, vertex_normals, lights, cameras, materials
|
||||
verts, verts_normals, lights, cameras, materials
|
||||
)
|
||||
verts_colors_shaded = vertex_colors * (ambient + diffuse) + specular
|
||||
|
||||
verts_colors_shaded = verts_colors * (ambient + diffuse) + specular
|
||||
face_colors = verts_colors_shaded[faces]
|
||||
colors = interpolate_face_attributes(
|
||||
fragments.pix_to_face, fragments.bary_coords, face_colors
|
||||
|
||||
1049
pytorch3d/renderer/mesh/textures.py
Normal file
1049
pytorch3d/renderer/mesh/textures.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,113 +0,0 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from pytorch3d.ops import interpolate_face_attributes
|
||||
from pytorch3d.structures.textures import Textures
|
||||
|
||||
|
||||
def interpolate_texture_map(fragments, meshes) -> torch.Tensor:
|
||||
"""
|
||||
Interpolate a 2D texture map using uv vertex texture coordinates for each
|
||||
face in the mesh. First interpolate the vertex uvs using barycentric coordinates
|
||||
for each pixel in the rasterized output. Then interpolate the texture map
|
||||
using the uv coordinate for each pixel.
|
||||
|
||||
Args:
|
||||
fragments:
|
||||
The outputs of rasterization. From this we use
|
||||
|
||||
- pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices
|
||||
of the faces (in the packed representation) which
|
||||
overlap each pixel in the image.
|
||||
- barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
|
||||
the barycentric coordianates of each pixel
|
||||
relative to the faces (in the packed
|
||||
representation) which overlap the pixel.
|
||||
meshes: Meshes representing a batch of meshes. It is expected that
|
||||
meshes has a textures attribute which is an instance of the
|
||||
Textures class.
|
||||
|
||||
Returns:
|
||||
texels: tensor of shape (N, H, W, K, C) giving the interpolated
|
||||
texture for each pixel in the rasterized image.
|
||||
"""
|
||||
if not isinstance(meshes.textures, Textures):
|
||||
msg = "Expected meshes.textures to be an instance of Textures; got %r"
|
||||
raise ValueError(msg % type(meshes.textures))
|
||||
|
||||
faces_uvs = meshes.textures.faces_uvs_packed()
|
||||
verts_uvs = meshes.textures.verts_uvs_packed()
|
||||
faces_verts_uvs = verts_uvs[faces_uvs]
|
||||
texture_maps = meshes.textures.maps_padded()
|
||||
|
||||
# pixel_uvs: (N, H, W, K, 2)
|
||||
pixel_uvs = interpolate_face_attributes(
|
||||
fragments.pix_to_face, fragments.bary_coords, faces_verts_uvs
|
||||
)
|
||||
|
||||
N, H_out, W_out, K = fragments.pix_to_face.shape
|
||||
N, H_in, W_in, C = texture_maps.shape # 3 for RGB
|
||||
|
||||
# pixel_uvs: (N, H, W, K, 2) -> (N, K, H, W, 2) -> (NK, H, W, 2)
|
||||
pixel_uvs = pixel_uvs.permute(0, 3, 1, 2, 4).reshape(N * K, H_out, W_out, 2)
|
||||
|
||||
# textures.map:
|
||||
# (N, H, W, C) -> (N, C, H, W) -> (1, N, C, H, W)
|
||||
# -> expand (K, N, C, H, W) -> reshape (N*K, C, H, W)
|
||||
texture_maps = (
|
||||
texture_maps.permute(0, 3, 1, 2)[None, ...]
|
||||
.expand(K, -1, -1, -1, -1)
|
||||
.transpose(0, 1)
|
||||
.reshape(N * K, C, H_in, W_in)
|
||||
)
|
||||
|
||||
# Textures: (N*K, C, H, W), pixel_uvs: (N*K, H, W, 2)
|
||||
# Now need to format the pixel uvs and the texture map correctly!
|
||||
# From pytorch docs, grid_sample takes `grid` and `input`:
|
||||
# grid specifies the sampling pixel locations normalized by
|
||||
# the input spatial dimensions It should have most
|
||||
# values in the range of [-1, 1]. Values x = -1, y = -1
|
||||
# is the left-top pixel of input, and values x = 1, y = 1 is the
|
||||
# right-bottom pixel of input.
|
||||
|
||||
pixel_uvs = pixel_uvs * 2.0 - 1.0
|
||||
texture_maps = torch.flip(texture_maps, [2]) # flip y axis of the texture map
|
||||
if texture_maps.device != pixel_uvs.device:
|
||||
texture_maps = texture_maps.to(pixel_uvs.device)
|
||||
texels = F.grid_sample(texture_maps, pixel_uvs, align_corners=False)
|
||||
texels = texels.reshape(N, K, C, H_out, W_out).permute(0, 3, 4, 1, 2)
|
||||
return texels
|
||||
|
||||
|
||||
def interpolate_vertex_colors(fragments, meshes) -> torch.Tensor:
|
||||
"""
|
||||
Detemine the color for each rasterized face. Interpolate the colors for
|
||||
vertices which form the face using the barycentric coordinates.
|
||||
Args:
|
||||
meshes: A Meshes class representing a batch of meshes.
|
||||
fragments:
|
||||
The outputs of rasterization. From this we use
|
||||
|
||||
- pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices
|
||||
of the faces (in the packed representation) which
|
||||
overlap each pixel in the image.
|
||||
- barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
|
||||
the barycentric coordianates of each pixel
|
||||
relative to the faces (in the packed
|
||||
representation) which overlap the pixel.
|
||||
|
||||
Returns:
|
||||
texels: An texture per pixel of shape (N, H, W, K, C).
|
||||
There will be one C dimensional value for each element in
|
||||
fragments.pix_to_face.
|
||||
"""
|
||||
vertex_textures = meshes.textures.verts_rgb_padded().reshape(-1, 3) # (V, C)
|
||||
vertex_textures = vertex_textures[meshes.verts_padded_to_packed_idx(), :]
|
||||
faces_packed = meshes.faces_packed()
|
||||
faces_textures = vertex_textures[faces_packed] # (F, 3, C)
|
||||
texels = interpolate_face_attributes(
|
||||
fragments.pix_to_face, fragments.bary_coords, faces_textures
|
||||
)
|
||||
return texels
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
from .meshes import Meshes, join_meshes_as_batch
|
||||
from .pointclouds import Pointclouds
|
||||
from .textures import Textures
|
||||
from .utils import list_to_packed, list_to_padded, packed_to_list, padded_to_list
|
||||
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ from typing import List, Union
|
||||
import torch
|
||||
|
||||
from . import utils as struct_utils
|
||||
from .textures import Textures
|
||||
|
||||
|
||||
class Meshes(object):
|
||||
@@ -234,9 +233,9 @@ class Meshes(object):
|
||||
Refer to comments above for descriptions of List and Padded representations.
|
||||
"""
|
||||
self.device = None
|
||||
if textures is not None and not isinstance(textures, Textures):
|
||||
msg = "Expected textures to be of type Textures; got %r"
|
||||
raise ValueError(msg % type(textures))
|
||||
if textures is not None and not repr(textures) == "TexturesBase":
|
||||
msg = "Expected textures to be an instance of type TexturesBase; got %r"
|
||||
raise ValueError(msg % repr(textures))
|
||||
self.textures = textures
|
||||
|
||||
# Indicates whether the meshes in the list/batch have the same number
|
||||
@@ -400,6 +399,8 @@ class Meshes(object):
|
||||
if self.textures is not None:
|
||||
self.textures._num_faces_per_mesh = self._num_faces_per_mesh.tolist()
|
||||
self.textures._num_verts_per_mesh = self._num_verts_per_mesh.tolist()
|
||||
self.textures._N = self._N
|
||||
self.textures.valid = self.valid
|
||||
|
||||
def __len__(self):
|
||||
return self._N
|
||||
@@ -1465,6 +1466,17 @@ class Meshes(object):
|
||||
|
||||
return self.__class__(verts=new_verts_list, faces=new_faces_list, textures=tex)
|
||||
|
||||
def sample_textures(self, fragments):
|
||||
if self.textures is not None:
|
||||
# Pass in faces packed. If the textures are defined per
|
||||
# vertex, the face indices are needed in order to interpolate
|
||||
# the vertex attributes across the face.
|
||||
return self.textures.sample_textures(
|
||||
fragments, faces_packed=self.faces_packed()
|
||||
)
|
||||
else:
|
||||
raise ValueError("Meshes does not have textures")
|
||||
|
||||
|
||||
def join_meshes_as_batch(meshes: List[Meshes], include_textures: bool = True):
|
||||
"""
|
||||
@@ -1499,44 +1511,14 @@ def join_meshes_as_batch(meshes: List[Meshes], include_textures: bool = True):
|
||||
raise ValueError("Inconsistent textures in join_meshes_as_batch.")
|
||||
|
||||
# Now we know there are multiple meshes and they have textures to merge.
|
||||
first = meshes[0].textures
|
||||
kwargs = {}
|
||||
if first.maps_padded() is not None:
|
||||
if any(mesh.textures.maps_padded() is None for mesh in meshes):
|
||||
raise ValueError("Inconsistent maps_padded in join_meshes_as_batch.")
|
||||
maps = [m for mesh in meshes for m in mesh.textures.maps_padded()]
|
||||
kwargs["maps"] = maps
|
||||
elif any(mesh.textures.maps_padded() is not None for mesh in meshes):
|
||||
raise ValueError("Inconsistent maps_padded in join_meshes_as_batch.")
|
||||
all_textures = [mesh.textures for mesh in meshes]
|
||||
first = all_textures[0]
|
||||
tex_types_same = all(type(tex) == type(first) for tex in all_textures)
|
||||
|
||||
if first.verts_uvs_padded() is not None:
|
||||
if any(mesh.textures.verts_uvs_padded() is None for mesh in meshes):
|
||||
raise ValueError("Inconsistent verts_uvs_padded in join_meshes_as_batch.")
|
||||
uvs = [uv for mesh in meshes for uv in mesh.textures.verts_uvs_list()]
|
||||
V = max(uv.shape[0] for uv in uvs)
|
||||
kwargs["verts_uvs"] = struct_utils.list_to_padded(uvs, (V, 2), -1)
|
||||
elif any(mesh.textures.verts_uvs_padded() is not None for mesh in meshes):
|
||||
raise ValueError("Inconsistent verts_uvs_padded in join_meshes_as_batch.")
|
||||
if not tex_types_same:
|
||||
raise ValueError("All meshes in the batch must have the same type of texture.")
|
||||
|
||||
if first.faces_uvs_padded() is not None:
|
||||
if any(mesh.textures.faces_uvs_padded() is None for mesh in meshes):
|
||||
raise ValueError("Inconsistent faces_uvs_padded in join_meshes_as_batch.")
|
||||
uvs = [uv for mesh in meshes for uv in mesh.textures.faces_uvs_list()]
|
||||
F = max(uv.shape[0] for uv in uvs)
|
||||
kwargs["faces_uvs"] = struct_utils.list_to_padded(uvs, (F, 3), -1)
|
||||
elif any(mesh.textures.faces_uvs_padded() is not None for mesh in meshes):
|
||||
raise ValueError("Inconsistent faces_uvs_padded in join_meshes_as_batch.")
|
||||
|
||||
if first.verts_rgb_padded() is not None:
|
||||
if any(mesh.textures.verts_rgb_padded() is None for mesh in meshes):
|
||||
raise ValueError("Inconsistent verts_rgb_padded in join_meshes_as_batch.")
|
||||
rgb = [i for mesh in meshes for i in mesh.textures.verts_rgb_list()]
|
||||
V = max(i.shape[0] for i in rgb)
|
||||
kwargs["verts_rgb"] = struct_utils.list_to_padded(rgb, (V, 3))
|
||||
elif any(mesh.textures.verts_rgb_padded() is not None for mesh in meshes):
|
||||
raise ValueError("Inconsistent verts_rgb_padded in join_meshes_as_batch.")
|
||||
|
||||
tex = Textures(**kwargs)
|
||||
tex = first.join_batch(all_textures[1:])
|
||||
return Meshes(verts=verts, faces=faces, textures=tex)
|
||||
|
||||
|
||||
@@ -1544,7 +1526,7 @@ def join_mesh(meshes: Union[Meshes, List[Meshes]]) -> Meshes:
|
||||
"""
|
||||
Joins a batch of meshes in the form of a Meshes object or a list of Meshes
|
||||
objects as a single mesh. If the input is a list, the Meshes objects in the list
|
||||
must all be on the same device. This version ignores all textures in the input mehses.
|
||||
must all be on the same device. This version ignores all textures in the input meshes.
|
||||
|
||||
Args:
|
||||
meshes: Meshes object that contains a batch of meshes or a list of Meshes objects
|
||||
|
||||
@@ -1,279 +0,0 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import torch
|
||||
from torch.nn.functional import interpolate
|
||||
|
||||
from .utils import padded_to_list, padded_to_packed
|
||||
|
||||
|
||||
"""
|
||||
This file has functions for interpolating textures after rasterization.
|
||||
"""
|
||||
|
||||
|
||||
def _pad_texture_maps(images: List[torch.Tensor]) -> torch.Tensor:
|
||||
"""
|
||||
Pad all texture images so they have the same height and width.
|
||||
|
||||
Args:
|
||||
images: list of N tensors of shape (H, W, 3)
|
||||
|
||||
Returns:
|
||||
tex_maps: Tensor of shape (N, max_H, max_W, 3)
|
||||
"""
|
||||
tex_maps = []
|
||||
max_H = 0
|
||||
max_W = 0
|
||||
for im in images:
|
||||
h, w, _3 = im.shape
|
||||
if h > max_H:
|
||||
max_H = h
|
||||
if w > max_W:
|
||||
max_W = w
|
||||
tex_maps.append(im)
|
||||
max_shape = (max_H, max_W)
|
||||
|
||||
for i, image in enumerate(tex_maps):
|
||||
if image.shape[:2] != max_shape:
|
||||
image_BCHW = image.permute(2, 0, 1)[None]
|
||||
new_image_BCHW = interpolate(
|
||||
image_BCHW, size=max_shape, mode="bilinear", align_corners=False
|
||||
)
|
||||
tex_maps[i] = new_image_BCHW[0].permute(1, 2, 0)
|
||||
tex_maps = torch.stack(tex_maps, dim=0) # (num_tex_maps, max_H, max_W, 3)
|
||||
return tex_maps
|
||||
|
||||
|
||||
def _extend_tensor(input_tensor: torch.Tensor, N: int) -> torch.Tensor:
|
||||
"""
|
||||
Extend a tensor `input_tensor` with ndim > 2, `N` times along the batch
|
||||
dimension. This is done in the following sequence of steps (where `B` is
|
||||
the batch dimension):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
input_tensor (B, ...)
|
||||
-> add leading empty dimension (1, B, ...)
|
||||
-> expand (N, B, ...)
|
||||
-> reshape (N * B, ...)
|
||||
|
||||
Args:
|
||||
input_tensor: torch.Tensor with ndim > 2 representing a batched input.
|
||||
N: number of times to extend each element of the batch.
|
||||
"""
|
||||
# pyre-fixme[16]: `Tensor` has no attribute `ndim`.
|
||||
if input_tensor.ndim < 2:
|
||||
raise ValueError("Input tensor must have ndimensions >= 2.")
|
||||
B = input_tensor.shape[0]
|
||||
non_batch_dims = tuple(input_tensor.shape[1:])
|
||||
constant_dims = (-1,) * input_tensor.ndim # these dims are not expanded.
|
||||
return (
|
||||
input_tensor.clone()[None, ...]
|
||||
.expand(N, *constant_dims)
|
||||
.transpose(0, 1)
|
||||
.reshape(N * B, *non_batch_dims)
|
||||
)
|
||||
|
||||
|
||||
class Textures(object):
|
||||
def __init__(
|
||||
self,
|
||||
maps: Union[List, torch.Tensor, None] = None,
|
||||
faces_uvs: Optional[torch.Tensor] = None,
|
||||
verts_uvs: Optional[torch.Tensor] = None,
|
||||
verts_rgb: Optional[torch.Tensor] = None,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
maps: texture map per mesh. This can either be a list of maps
|
||||
[(H, W, 3)] or a padded tensor of shape (N, H, W, 3).
|
||||
faces_uvs: (N, F, 3) tensor giving the index into verts_uvs for each
|
||||
vertex in the face. Padding value is assumed to be -1.
|
||||
verts_uvs: (N, V, 2) tensor giving the uv coordinate per vertex.
|
||||
verts_rgb: (N, V, 3) tensor giving the rgb color per vertex. Padding
|
||||
value is assumed to be -1.
|
||||
|
||||
Note: only the padded representation of the textures is stored
|
||||
and the packed/list representations are computed on the fly and
|
||||
not cached.
|
||||
"""
|
||||
# pyre-fixme[16]: `Tensor` has no attribute `ndim`.
|
||||
if faces_uvs is not None and faces_uvs.ndim != 3:
|
||||
msg = "Expected faces_uvs to be of shape (N, F, 3); got %r"
|
||||
raise ValueError(msg % repr(faces_uvs.shape))
|
||||
if verts_uvs is not None and verts_uvs.ndim != 3:
|
||||
msg = "Expected verts_uvs to be of shape (N, V, 2); got %r"
|
||||
raise ValueError(msg % repr(verts_uvs.shape))
|
||||
if verts_rgb is not None and verts_rgb.ndim != 3:
|
||||
msg = "Expected verts_rgb to be of shape (N, V, 3); got %r"
|
||||
raise ValueError(msg % repr(verts_rgb.shape))
|
||||
if maps is not None:
|
||||
# pyre-fixme[16]: `List` has no attribute `ndim`.
|
||||
if torch.is_tensor(maps) and maps.ndim != 4:
|
||||
msg = "Expected maps to be of shape (N, H, W, 3); got %r"
|
||||
# pyre-fixme[16]: `List` has no attribute `shape`.
|
||||
raise ValueError(msg % repr(maps.shape))
|
||||
elif isinstance(maps, list):
|
||||
maps = _pad_texture_maps(maps)
|
||||
if faces_uvs is None or verts_uvs is None:
|
||||
msg = "To use maps, faces_uvs and verts_uvs are required"
|
||||
raise ValueError(msg)
|
||||
|
||||
self._faces_uvs_padded = faces_uvs
|
||||
self._verts_uvs_padded = verts_uvs
|
||||
self._verts_rgb_padded = verts_rgb
|
||||
self._maps_padded = maps
|
||||
|
||||
# The number of faces/verts for each mesh is
|
||||
# set inside the Meshes object when textures is
|
||||
# passed into the Meshes constructor.
|
||||
self._num_faces_per_mesh = None
|
||||
self._num_verts_per_mesh = None
|
||||
|
||||
def clone(self):
|
||||
other = self.__class__()
|
||||
for k in dir(self):
|
||||
v = getattr(self, k)
|
||||
if torch.is_tensor(v):
|
||||
setattr(other, k, v.clone())
|
||||
return other
|
||||
|
||||
def to(self, device):
|
||||
for k in dir(self):
|
||||
v = getattr(self, k)
|
||||
if torch.is_tensor(v) and v.device != device:
|
||||
setattr(self, k, v.to(device))
|
||||
return self
|
||||
|
||||
def __getitem__(self, index):
|
||||
other = self.__class__()
|
||||
for key in dir(self):
|
||||
value = getattr(self, key)
|
||||
if torch.is_tensor(value):
|
||||
if isinstance(index, int):
|
||||
setattr(other, key, value[index][None])
|
||||
else:
|
||||
setattr(other, key, value[index])
|
||||
return other
|
||||
|
||||
def faces_uvs_padded(self) -> torch.Tensor:
|
||||
# pyre-fixme[7]: Expected `Tensor` but got `Optional[torch.Tensor]`.
|
||||
return self._faces_uvs_padded
|
||||
|
||||
def faces_uvs_list(self) -> Union[List[torch.Tensor], None]:
|
||||
if self._faces_uvs_padded is None:
|
||||
return None
|
||||
return padded_to_list(
|
||||
# pyre-fixme[6]: Expected `Tensor` for 1st param but got
|
||||
# `Optional[torch.Tensor]`.
|
||||
self._faces_uvs_padded,
|
||||
split_size=self._num_faces_per_mesh,
|
||||
)
|
||||
|
||||
def faces_uvs_packed(self) -> Union[torch.Tensor, None]:
|
||||
if self._faces_uvs_padded is None:
|
||||
return None
|
||||
return padded_to_packed(
|
||||
# pyre-fixme[6]: Expected `Tensor` for 1st param but got
|
||||
# `Optional[torch.Tensor]`.
|
||||
self._faces_uvs_padded,
|
||||
split_size=self._num_faces_per_mesh,
|
||||
)
|
||||
|
||||
def verts_uvs_padded(self) -> Union[torch.Tensor, None]:
|
||||
return self._verts_uvs_padded
|
||||
|
||||
def verts_uvs_list(self) -> Union[List[torch.Tensor], None]:
|
||||
if self._verts_uvs_padded is None:
|
||||
return None
|
||||
# Vertices shared between multiple faces
|
||||
# may have a different uv coordinate for
|
||||
# each face so the num_verts_uvs_per_mesh
|
||||
# may be different from num_verts_per_mesh.
|
||||
# Therefore don't use any split_size.
|
||||
# pyre-fixme[6]: Expected `Tensor` for 1st param but got
|
||||
# `Optional[torch.Tensor]`.
|
||||
return padded_to_list(self._verts_uvs_padded)
|
||||
|
||||
def verts_uvs_packed(self) -> Union[torch.Tensor, None]:
|
||||
if self._verts_uvs_padded is None:
|
||||
return None
|
||||
# Vertices shared between multiple faces
|
||||
# may have a different uv coordinate for
|
||||
# each face so the num_verts_uvs_per_mesh
|
||||
# may be different from num_verts_per_mesh.
|
||||
# Therefore don't use any split_size.
|
||||
# pyre-fixme[6]: Expected `Tensor` for 1st param but got
|
||||
# `Optional[torch.Tensor]`.
|
||||
return padded_to_packed(self._verts_uvs_padded)
|
||||
|
||||
def verts_rgb_padded(self) -> Union[torch.Tensor, None]:
|
||||
return self._verts_rgb_padded
|
||||
|
||||
def verts_rgb_list(self) -> Union[List[torch.Tensor], None]:
|
||||
if self._verts_rgb_padded is None:
|
||||
return None
|
||||
return padded_to_list(
|
||||
# pyre-fixme[6]: Expected `Tensor` for 1st param but got
|
||||
# `Optional[torch.Tensor]`.
|
||||
self._verts_rgb_padded,
|
||||
split_size=self._num_verts_per_mesh,
|
||||
)
|
||||
|
||||
def verts_rgb_packed(self) -> Union[torch.Tensor, None]:
|
||||
if self._verts_rgb_padded is None:
|
||||
return None
|
||||
return padded_to_packed(
|
||||
# pyre-fixme[6]: Expected `Tensor` for 1st param but got
|
||||
# `Optional[torch.Tensor]`.
|
||||
self._verts_rgb_padded,
|
||||
split_size=self._num_verts_per_mesh,
|
||||
)
|
||||
|
||||
# Currently only the padded maps are used.
|
||||
def maps_padded(self) -> Union[torch.Tensor, None]:
|
||||
# pyre-fixme[7]: Expected `Optional[torch.Tensor]` but got `Union[None,
|
||||
# List[typing.Any], torch.Tensor]`.
|
||||
return self._maps_padded
|
||||
|
||||
def extend(self, N: int) -> "Textures":
|
||||
"""
|
||||
Create new Textures class which contains each input texture N times
|
||||
|
||||
Args:
|
||||
N: number of new copies of each texture.
|
||||
|
||||
Returns:
|
||||
new Textures object.
|
||||
"""
|
||||
if not isinstance(N, int):
|
||||
raise ValueError("N must be an integer.")
|
||||
if N <= 0:
|
||||
raise ValueError("N must be > 0.")
|
||||
|
||||
if all(
|
||||
v is not None
|
||||
for v in [self._faces_uvs_padded, self._verts_uvs_padded, self._maps_padded]
|
||||
):
|
||||
# pyre-fixme[6]: Expected `Tensor` for 1st param but got
|
||||
# `Optional[torch.Tensor]`.
|
||||
new_verts_uvs = _extend_tensor(self._verts_uvs_padded, N)
|
||||
# pyre-fixme[6]: Expected `Tensor` for 1st param but got
|
||||
# `Optional[torch.Tensor]`.
|
||||
new_faces_uvs = _extend_tensor(self._faces_uvs_padded, N)
|
||||
# pyre-fixme[6]: Expected `Tensor` for 1st param but got `Union[None,
|
||||
# List[typing.Any], torch.Tensor]`.
|
||||
new_maps = _extend_tensor(self._maps_padded, N)
|
||||
return self.__class__(
|
||||
verts_uvs=new_verts_uvs, faces_uvs=new_faces_uvs, maps=new_maps
|
||||
)
|
||||
elif self._verts_rgb_padded is not None:
|
||||
# pyre-fixme[6]: Expected `Tensor` for 1st param but got
|
||||
# `Optional[torch.Tensor]`.
|
||||
new_verts_rgb = _extend_tensor(self._verts_rgb_padded, N)
|
||||
return self.__class__(verts_rgb=new_verts_rgb)
|
||||
else:
|
||||
msg = "Either vertex colors or texture maps are required."
|
||||
raise ValueError(msg)
|
||||
@@ -73,6 +73,7 @@ def padded_to_list(x: torch.Tensor, split_size: Union[list, tuple, None] = None)
|
||||
# pyre-fixme[16]: `Tensor` has no attribute `ndim`.
|
||||
if x.ndim != 3:
|
||||
raise ValueError("Supports only 3-dimensional input tensors")
|
||||
|
||||
x_list = list(x.unbind(0))
|
||||
|
||||
if split_size is None:
|
||||
|
||||
Reference in New Issue
Block a user