Patrick Labatut 8219a52ccc Fix a few linting warnings
Summary: Fix a few linting warnings

Reviewed By: nikhilaravi

Differential Revision: D20720810

fbshipit-source-id: c5b6a25fdd7971cc8743b54bbe162464a874071d
2020-10-01 02:39:27 -07:00

1304 lines
52 KiB
Python

# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import itertools
import warnings
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
from pytorch3d.ops import interpolate_face_attributes
from pytorch3d.structures.utils import list_to_packed, list_to_padded, padded_to_list
from torch.nn.functional import interpolate
from .utils import pack_rectangles
# This file contains classes and helper functions for texturing.
# There are three types of textures: TexturesVertex, TexturesAtlas
# and TexturesUV which inherit from a base textures class TexturesBase.
#
# Each texture class has a method 'sample_textures' to sample a
# value given barycentric coordinates.
#
# All the textures accept either list or padded inputs. The values
# are stored as either per face values (TexturesAtlas, TexturesUV),
# or per face vertex features (TexturesVertex).
def _list_to_padded_wrapper(
x: List[torch.Tensor],
pad_size: Union[list, tuple, None] = None,
pad_value: float = 0.0,
) -> torch.Tensor:
r"""
This is a wrapper function for
pytorch3d.structures.utils.list_to_padded function which only accepts
3-dimensional inputs.
For this use case, the input x is of shape (F, 3, ...) where only F
is different for each element in the list
Transforms a list of N tensors each of shape (Mi, ...) into a single tensor
of shape (N, pad_size, ...), or (N, max(Mi), ...)
if pad_size is None.
Args:
x: list of Tensors
pad_size: int specifying the size of the first dimension
of the padded tensor
pad_value: float value to be used to fill the padded tensor
Returns:
x_padded: tensor consisting of padded input tensors
"""
N = len(x)
dims = x[0].ndim
reshape_dims = x[0].shape[1:]
D = torch.prod(torch.tensor(reshape_dims)).item()
x_reshaped = []
for y in x:
if y.ndim != dims and y.shape[1:] != reshape_dims:
msg = (
"list_to_padded requires tensors to have the same number of dimensions"
)
raise ValueError(msg)
x_reshaped.append(y.reshape(-1, D))
x_padded = list_to_padded(x_reshaped, pad_size=pad_size, pad_value=pad_value)
return x_padded.reshape((N, -1) + reshape_dims)
def _padded_to_list_wrapper(
x: torch.Tensor, split_size: Union[list, tuple, None] = None
) -> List[torch.Tensor]:
r"""
This is a wrapper function for pytorch3d.structures.utils.padded_to_list
which only accepts 3-dimensional inputs.
For this use case, the input x is of shape (N, F, ...) where F
is the number of faces which is different for each tensor in the batch.
This function transforms a padded tensor of shape (N, M, ...) into a
list of N tensors of shape (Mi, ...) where (Mi) is specified in
split_size(i), or of shape (M,) if split_size is None.
Args:
x: padded Tensor
split_size: list of ints defining the number of items for each tensor
in the output list.
Returns:
x_list: a list of tensors
"""
N, M = x.shape[:2]
reshape_dims = x.shape[2:]
D = torch.prod(torch.tensor(reshape_dims)).item()
x_reshaped = x.reshape(N, M, D)
x_list = padded_to_list(x_reshaped, split_size=split_size)
x_list = [xl.reshape((xl.shape[0],) + reshape_dims) for xl in x_list]
return x_list
def _pad_texture_maps(
images: Union[Tuple[torch.Tensor], List[torch.Tensor]], align_corners: bool
) -> torch.Tensor:
"""
Pad all texture images so they have the same height and width.
Args:
images: list of N tensors of shape (H_i, W_i, 3)
align_corners: used for interpolation
Returns:
tex_maps: Tensor of shape (N, max_H, max_W, 3)
"""
tex_maps = []
max_H = 0
max_W = 0
for im in images:
h, w, _3 = im.shape
if h > max_H:
max_H = h
if w > max_W:
max_W = w
tex_maps.append(im)
max_shape = (max_H, max_W)
for i, image in enumerate(tex_maps):
if image.shape[:2] != max_shape:
image_BCHW = image.permute(2, 0, 1)[None]
new_image_BCHW = interpolate(
image_BCHW, size=max_shape, mode="bilinear", align_corners=align_corners
)
tex_maps[i] = new_image_BCHW[0].permute(1, 2, 0)
tex_maps = torch.stack(tex_maps, dim=0) # (num_tex_maps, max_H, max_W, 3)
return tex_maps
# A base class for defining a batch of textures
# with helper methods.
# This is also useful to have so that inside `Meshes`
# we can allow the input textures to be any texture
# type which is an instance of the base class.
class TexturesBase:
def __init__(self):
self._N = 0
self.valid = None
def isempty(self):
if self._N is not None and self.valid is not None:
return self._N == 0 or self.valid.eq(False).all()
return False
def to(self, device):
for k in dir(self):
v = getattr(self, k)
if isinstance(v, (list, tuple)) and all(
torch.is_tensor(elem) for elem in v
):
v = [elem.to(device) for elem in v]
setattr(self, k, v)
if torch.is_tensor(v) and v.device != device:
setattr(self, k, v.to(device))
return self
def _extend(self, N: int, props: List[str]) -> Dict[str, Union[torch.Tensor, List]]:
"""
Create a dict with the specified properties
repeated N times per batch element.
Args:
N: number of new copies of each texture
in the batch.
props: a List of strings which refer to either
class attributes or class methods which
return tensors or lists.
Returns:
Dict with the same keys as props. The values are the
extended properties.
"""
if not isinstance(N, int):
raise ValueError("N must be an integer.")
if N <= 0:
raise ValueError("N must be > 0.")
new_props = {}
for p in props:
t = getattr(self, p)
if callable(t):
t = t() # class method
if isinstance(t, list):
if not all(isinstance(elem, (int, float)) for elem in t):
raise ValueError("Extend only supports lists of scalars")
t = [[ti] * N for ti in t]
new_props[p] = list(itertools.chain(*t))
elif torch.is_tensor(t):
new_props[p] = t.repeat_interleave(N, dim=0)
return new_props
def _getitem(self, index: Union[int, slice], props: List[str]):
"""
Helper function for __getitem__
"""
new_props = {}
if isinstance(index, (int, slice)):
for p in props:
t = getattr(self, p)
if callable(t):
t = t() # class method
new_props[p] = t[index]
elif isinstance(index, list):
index = torch.tensor(index)
if isinstance(index, torch.Tensor):
if index.dtype == torch.bool:
index = index.nonzero()
index = index.squeeze(1) if index.numel() > 0 else index
index = index.tolist()
for p in props:
t = getattr(self, p)
if callable(t):
t = t() # class method
new_props[p] = [t[i] for i in index]
return new_props
def sample_textures(self):
"""
Different texture classes sample textures in different ways
e.g. for vertex textures, the values at each vertex
are interpolated across the face using the barycentric
coordinates.
Each texture class should implement a sample_textures
method to take the `fragments` from rasterization.
Using `fragments.pix_to_face` and `fragments.bary_coords`
this function should return the sampled texture values for
each pixel in the output image.
"""
raise NotImplementedError()
def clone(self):
"""
Each texture class should implement a method
to clone all necessary internal tensors.
"""
raise NotImplementedError()
def detach(self):
"""
Each texture class should implement a method
to detach all necessary internal tensors.
"""
raise NotImplementedError()
def __getitem__(self, index):
"""
Each texture class should implement a method
to get the texture properites for the
specified elements in the batch.
The TexturesBase._getitem(i) method
can be used as a helper funtion to retrieve the
class attributes for item i. Then, a new
instance of the child class can be created with
the attributes.
"""
raise NotImplementedError()
def Textures(
maps: Union[List, torch.Tensor, None] = None,
faces_uvs: Optional[torch.Tensor] = None,
verts_uvs: Optional[torch.Tensor] = None,
verts_rgb: Optional[torch.Tensor] = None,
) -> TexturesBase:
"""
Textures class has been DEPRECATED.
Preserving Textures as a function for backwards compatibility.
Args:
maps: texture map per mesh. This can either be a list of maps
[(H, W, 3)] or a padded tensor of shape (N, H, W, 3).
faces_uvs: (N, F, 3) tensor giving the index into verts_uvs for each
vertex in the face. Padding value is assumed to be -1.
verts_uvs: (N, V, 2) tensor giving the uv coordinate per vertex.
verts_rgb: (N, V, 3) tensor giving the rgb color per vertex. Padding
value is assumed to be -1.
Returns:
a Textures class which is an instance of TexturesBase e.g. TexturesUV,
TexturesAtlas, TexturesVerte
"""
warnings.warn(
"""Textures class is deprecated,
use TexturesUV, TexturesAtlas, TexturesVertex instead.
Textures class will be removed in future releases.""",
PendingDeprecationWarning,
)
if all(x is not None for x in [faces_uvs, verts_uvs, maps]):
# pyre-fixme[6]: Expected `Union[List[torch.Tensor], torch.Tensor]` for 1st
# param but got `Union[None, List[typing.Any], torch.Tensor]`.
return TexturesUV(maps=maps, faces_uvs=faces_uvs, verts_uvs=verts_uvs)
elif verts_rgb is not None:
return TexturesVertex(verts_features=verts_rgb)
else:
raise ValueError(
"Textures either requires all three of (faces uvs, verts uvs, maps) or verts rgb"
)
class TexturesAtlas(TexturesBase):
def __init__(self, atlas: Union[torch.Tensor, List, None]):
"""
A texture representation where each face has a square texture map.
This is based on the implementation from SoftRasterizer [1].
Args:
atlas: (N, F, R, R, D) tensor giving the per face texture map.
The atlas can be created during obj loading with the
pytorch3d.io.load_obj function - in the input arguments
set `create_texture_atlas=True`. The atlas will be
returned in aux.texture_atlas.
The padded and list representations of the textures are stored
and the packed representations is computed on the fly and
not cached.
[1] Liu et al, 'Soft Rasterizer: A Differentiable Renderer for Image-based
3D Reasoning', ICCV 2019
See also https://github.com/ShichenLiu/SoftRas/issues/21
"""
if isinstance(atlas, (list, tuple)):
correct_format = all(
(
torch.is_tensor(elem)
and elem.ndim == 4
and elem.shape[1] == elem.shape[2]
and elem.shape[1] == atlas[0].shape[1]
)
for elem in atlas
)
if not correct_format:
msg = (
"Expected atlas to be a list of tensors of shape (F, R, R, D) "
"with the same value of R."
)
raise ValueError(msg)
self._atlas_list = atlas
self._atlas_padded = None
self.device = torch.device("cpu")
# These values may be overridden when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
self._N = len(atlas)
self._num_faces_per_mesh = [len(a) for a in atlas]
if self._N > 0:
self.device = atlas[0].device
elif torch.is_tensor(atlas):
# pyre-fixme[16]: `Optional` has no attribute `ndim`.
if atlas.ndim != 5:
msg = "Expected atlas to be of shape (N, F, R, R, D); got %r"
raise ValueError(msg % repr(atlas.ndim))
self._atlas_padded = atlas
self._atlas_list = None
# pyre-fixme[16]: `Optional` has no attribute `device`.
self.device = atlas.device
# These values may be overridden when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
# pyre-fixme[6]: Expected `Sized` for 1st param but got
# `Optional[torch.Tensor]`.
self._N = len(atlas)
# pyre-fixme[16]: `Optional` has no attribute `shape`.
max_F = atlas.shape[1]
self._num_faces_per_mesh = [max_F] * self._N
else:
raise ValueError("Expected atlas to be a tensor or list")
# The num_faces_per_mesh, N and valid
# are reset inside the Meshes object when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
self.valid = torch.ones((self._N,), dtype=torch.bool, device=self.device)
def clone(self):
tex = self.__class__(atlas=self.atlas_padded().clone())
if self._atlas_list is not None:
tex._atlas_list = [atlas.clone() for atlas in self._atlas_list]
num_faces = (
self._num_faces_per_mesh.clone()
if torch.is_tensor(self._num_faces_per_mesh)
else self._num_faces_per_mesh
)
tex.valid = self.valid.clone()
tex._num_faces_per_mesh = num_faces
return tex
def detach(self):
tex = self.__class__(atlas=self.atlas_padded().detach())
if self._atlas_list is not None:
tex._atlas_list = [atlas.detach() for atlas in self._atlas_list]
num_faces = (
self._num_faces_per_mesh.detach()
if torch.is_tensor(self._num_faces_per_mesh)
else self._num_faces_per_mesh
)
tex.valid = self.valid.detach()
tex._num_faces_per_mesh = num_faces
return tex
def __getitem__(self, index):
props = ["atlas_list", "_num_faces_per_mesh"]
new_props = self._getitem(index, props=props)
atlas = new_props["atlas_list"]
if isinstance(atlas, list):
# multiple batch elements
new_tex = self.__class__(atlas=atlas)
elif torch.is_tensor(atlas):
# single element
new_tex = self.__class__(atlas=[atlas])
else:
raise ValueError("Not all values are provided in the correct format")
new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"]
return new_tex
def atlas_padded(self) -> torch.Tensor:
if self._atlas_padded is None:
if self.isempty():
self._atlas_padded = torch.zeros(
(self._N, 0, 0, 0, 3), dtype=torch.float32, device=self.device
)
else:
self._atlas_padded = _list_to_padded_wrapper(
self._atlas_list, pad_value=0.0
)
return self._atlas_padded
def atlas_list(self) -> List[torch.Tensor]:
if self._atlas_list is None:
if self.isempty():
self._atlas_padded = [
torch.empty((0, 0, 0, 3), dtype=torch.float32, device=self.device)
] * self._N
self._atlas_list = _padded_to_list_wrapper(
self._atlas_padded, split_size=self._num_faces_per_mesh
)
return self._atlas_list
def atlas_packed(self) -> torch.Tensor:
if self.isempty():
return torch.zeros(
(self._N, 0, 0, 3), dtype=torch.float32, device=self.device
)
atlas_list = self.atlas_list()
return list_to_packed(atlas_list)[0]
def extend(self, N: int) -> "TexturesAtlas":
new_props = self._extend(N, ["atlas_padded", "_num_faces_per_mesh"])
new_tex = TexturesAtlas(atlas=new_props["atlas_padded"])
new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"]
return new_tex
def sample_textures(self, fragments, **kwargs) -> torch.Tensor:
"""
Args:
fragments:
The outputs of rasterization. From this we use
- pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices
of the faces (in the packed representation) which
overlap each pixel in the image.
- barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
the barycentric coordianates of each pixel
relative to the faces (in the packed
representation) which overlap the pixel.
Returns:
texels: (N, H, W, K, 3)
"""
N, H, W, K = fragments.pix_to_face.shape
atlas_packed = self.atlas_packed()
R = atlas_packed.shape[1]
bary = fragments.bary_coords
pix_to_face = fragments.pix_to_face
bary_w01 = bary[..., :2]
# pyre-fixme[16]: `bool` has no attribute `__getitem__`.
mask = (pix_to_face < 0)[..., None]
bary_w01 = torch.where(mask, torch.zeros_like(bary_w01), bary_w01)
w_xy = (bary_w01 * R).to(torch.int64) # (N, H, W, K, 2)
below_diag = (
bary_w01.sum(dim=-1) * R - w_xy.float().sum(dim=-1)
) <= 1.0 # (N, H, W, K)
w_x, w_y = w_xy.unbind(-1)
w_x = torch.where(below_diag, w_x, (R - 1 - w_x))
w_y = torch.where(below_diag, w_y, (R - 1 - w_y))
texels = atlas_packed[pix_to_face, w_y, w_x]
texels = texels * (pix_to_face >= 0)[..., None].float()
return texels
def join_batch(self, textures: List["TexturesAtlas"]) -> "TexturesAtlas":
"""
Join the list of textures given by `textures` to
self to create a batch of textures. Return a new
TexturesAtlas object with the combined textures.
Args:
textures: List of TextureAtlas objects
Returns:
new_tex: TextureAtlas object with the combined
textures from self and the list `textures`.
"""
tex_types_same = all(isinstance(tex, TexturesAtlas) for tex in textures)
if not tex_types_same:
raise ValueError("All textures must be of type TexturesAtlas.")
atlas_list = []
atlas_list += self.atlas_list()
num_faces_per_mesh = self._num_faces_per_mesh
for tex in textures:
atlas_list += tex.atlas_list()
num_faces_per_mesh += tex._num_faces_per_mesh
new_tex = self.__class__(atlas=atlas_list)
new_tex._num_faces_per_mesh = num_faces_per_mesh
return new_tex
def join_scene(self) -> "TexturesAtlas":
"""
Return a new TexturesAtlas amalgamating the batch.
"""
return self.__class__(atlas=[torch.cat(self.atlas_list())])
class TexturesUV(TexturesBase):
def __init__(
self,
maps: Union[torch.Tensor, List[torch.Tensor]],
faces_uvs: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]],
verts_uvs: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]],
padding_mode: str = "border",
align_corners: bool = True,
):
"""
Textures are represented as a per mesh texture map and uv coordinates for each
vertex in each face. NOTE: this class only supports one texture map per mesh.
Args:
maps: texture map per mesh. This can either be a list of maps
[(H, W, 3)] or a padded tensor of shape (N, H, W, 3)
faces_uvs: (N, F, 3) LongTensor giving the index into verts_uvs
for each face
verts_uvs: (N, V, 2) tensor giving the uv coordinates per vertex
(a FloatTensor with values between 0 and 1).
align_corners: If true, the extreme values 0 and 1 for verts_uvs
indicate the centers of the edge pixels in the maps.
padding_mode: padding mode for outside grid values
("zeros", "border" or "reflection").
The align_corners and padding_mode arguments correspond to the arguments
of the `grid_sample` torch function. There is an informative illustration of
the two align_corners options at
https://discuss.pytorch.org/t/22663/9 .
An example of how the indexing into the maps, with align_corners=True,
works is as follows.
If maps[i] has shape [1001, 101] and the value of verts_uvs[i][j]
is [0.4, 0.3], then a value of j in faces_uvs[i] means a vertex
whose color is given by maps[i][700, 40]. padding_mode affects what
happens if a value in verts_uvs is less than 0 or greater than 1.
Note that increasing a value in verts_uvs[..., 0] increases an index
in maps, whereas increasing a value in verts_uvs[..., 1] _decreases_
an _earlier_ index in maps.
If align_corners=False, an example would be as follows.
If maps[i] has shape [1000, 100] and the value of verts_uvs[i][j]
is [0.405, 0.2995], then a value of j in faces_uvs[i] means a vertex
whose color is given by maps[i][700, 40].
When align_corners=False, padding_mode even matters for values in
verts_uvs slightly above 0 or slightly below 1. In this case, the
padding_mode matters if the first value is outside the interval
[0.0005, 0.9995] or if the second is outside the interval
[0.005, 0.995].
"""
super().__init__()
self.padding_mode = padding_mode
self.align_corners = align_corners
if isinstance(faces_uvs, (list, tuple)):
for fv in faces_uvs:
if fv.ndim != 2 or fv.shape[-1] != 3:
msg = "Expected faces_uvs to be of shape (F, 3); got %r"
raise ValueError(msg % repr(fv.shape))
self._faces_uvs_list = faces_uvs
self._faces_uvs_padded = None
self.device = torch.device("cpu")
# These values may be overridden when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
self._N = len(faces_uvs)
self._num_faces_per_mesh = [len(fv) for fv in faces_uvs]
if self._N > 0:
self.device = faces_uvs[0].device
elif torch.is_tensor(faces_uvs):
if faces_uvs.ndim != 3 or faces_uvs.shape[-1] != 3:
msg = "Expected faces_uvs to be of shape (N, F, 3); got %r"
raise ValueError(msg % repr(faces_uvs.shape))
self._faces_uvs_padded = faces_uvs
self._faces_uvs_list = None
self.device = faces_uvs.device
# These values may be overridden when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
self._N = len(faces_uvs)
max_F = faces_uvs.shape[1]
self._num_faces_per_mesh = [max_F] * self._N
else:
raise ValueError("Expected faces_uvs to be a tensor or list")
if isinstance(verts_uvs, (list, tuple)):
for fv in verts_uvs:
if fv.ndim != 2 or fv.shape[-1] != 2:
msg = "Expected verts_uvs to be of shape (V, 2); got %r"
raise ValueError(msg % repr(fv.shape))
self._verts_uvs_list = verts_uvs
self._verts_uvs_padded = None
if len(verts_uvs) != self._N:
raise ValueError(
"verts_uvs and faces_uvs must have the same batch dimension"
)
if not all(v.device == self.device for v in verts_uvs):
raise ValueError("verts_uvs and faces_uvs must be on the same device")
elif torch.is_tensor(verts_uvs):
if (
verts_uvs.ndim != 3
or verts_uvs.shape[-1] != 2
or verts_uvs.shape[0] != self._N
):
msg = "Expected verts_uvs to be of shape (N, V, 2); got %r"
raise ValueError(msg % repr(verts_uvs.shape))
self._verts_uvs_padded = verts_uvs
self._verts_uvs_list = None
if verts_uvs.device != self.device:
raise ValueError("verts_uvs and faces_uvs must be on the same device")
else:
raise ValueError("Expected verts_uvs to be a tensor or list")
if torch.is_tensor(maps):
# pyre-fixme[16]: `List` has no attribute `ndim`.
# pyre-fixme[16]: `List` has no attribute `shape`.
if maps.ndim != 4 or maps.shape[0] != self._N:
msg = "Expected maps to be of shape (N, H, W, 3); got %r"
raise ValueError(msg % repr(maps.shape))
self._maps_padded = maps
self._maps_list = None
elif isinstance(maps, (list, tuple)):
if len(maps) != self._N:
raise ValueError("Expected one texture map per mesh in the batch.")
self._maps_list = maps
if self._N > 0:
maps = _pad_texture_maps(maps, align_corners=self.align_corners)
else:
maps = torch.empty(
(self._N, 0, 0, 3), dtype=torch.float32, device=self.device
)
self._maps_padded = maps
else:
raise ValueError("Expected maps to be a tensor or list.")
if self._maps_padded.device != self.device:
raise ValueError("maps must be on the same device as verts/faces uvs.")
self.valid = torch.ones((self._N,), dtype=torch.bool, device=self.device)
def clone(self):
tex = self.__class__(
self.maps_padded().clone(),
self.faces_uvs_padded().clone(),
self.verts_uvs_padded().clone(),
)
if self._maps_list is not None:
tex._maps_list = [m.clone() for m in self._maps_list]
if self._verts_uvs_list is not None:
tex._verts_uvs_list = [v.clone() for v in self._verts_uvs_list]
if self._faces_uvs_list is not None:
tex._faces_uvs_list = [f.clone() for f in self._faces_uvs_list]
num_faces = (
self._num_faces_per_mesh.clone()
if torch.is_tensor(self._num_faces_per_mesh)
else self._num_faces_per_mesh
)
tex._num_faces_per_mesh = num_faces
tex.valid = self.valid.clone()
return tex
def detach(self):
tex = self.__class__(
self.maps_padded().detach(),
self.faces_uvs_padded().detach(),
self.verts_uvs_padded().detach(),
)
if self._maps_list is not None:
tex._maps_list = [m.detach() for m in self._maps_list]
if self._verts_uvs_list is not None:
tex._verts_uvs_list = [v.detach() for v in self._verts_uvs_list]
if self._faces_uvs_list is not None:
tex._faces_uvs_list = [f.detach() for f in self._faces_uvs_list]
num_faces = (
self._num_faces_per_mesh.detach()
if torch.is_tensor(self._num_faces_per_mesh)
else self._num_faces_per_mesh
)
tex._num_faces_per_mesh = num_faces
tex.valid = self.valid.detach()
return tex
def __getitem__(self, index):
props = ["verts_uvs_list", "faces_uvs_list", "maps_list", "_num_faces_per_mesh"]
new_props = self._getitem(index, props)
faces_uvs = new_props["faces_uvs_list"]
verts_uvs = new_props["verts_uvs_list"]
maps = new_props["maps_list"]
# if index has multiple values then faces/verts/maps may be a list of tensors
if all(isinstance(f, (list, tuple)) for f in [faces_uvs, verts_uvs, maps]):
new_tex = self.__class__(
faces_uvs=faces_uvs,
verts_uvs=verts_uvs,
maps=maps,
padding_mode=self.padding_mode,
align_corners=self.align_corners,
)
elif all(torch.is_tensor(f) for f in [faces_uvs, verts_uvs, maps]):
new_tex = self.__class__(
faces_uvs=[faces_uvs],
verts_uvs=[verts_uvs],
maps=[maps],
padding_mode=self.padding_mode,
align_corners=self.align_corners,
)
else:
raise ValueError("Not all values are provided in the correct format")
new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"]
return new_tex
def faces_uvs_padded(self) -> torch.Tensor:
if self._faces_uvs_padded is None:
if self.isempty():
self._faces_uvs_padded = torch.zeros(
(self._N, 0, 3), dtype=torch.float32, device=self.device
)
else:
self._faces_uvs_padded = list_to_padded(
self._faces_uvs_list, pad_value=0.0
)
return self._faces_uvs_padded
def faces_uvs_list(self) -> List[torch.Tensor]:
if self._faces_uvs_list is None:
if self.isempty():
self._faces_uvs_list = [
torch.empty((0, 3), dtype=torch.float32, device=self.device)
] * self._N
else:
self._faces_uvs_list = padded_to_list(
self._faces_uvs_padded, split_size=self._num_faces_per_mesh
)
return self._faces_uvs_list
def verts_uvs_padded(self) -> torch.Tensor:
if self._verts_uvs_padded is None:
if self.isempty():
self._verts_uvs_padded = torch.zeros(
(self._N, 0, 2), dtype=torch.float32, device=self.device
)
else:
self._verts_uvs_padded = list_to_padded(
self._verts_uvs_list, pad_value=0.0
)
return self._verts_uvs_padded
def verts_uvs_list(self) -> List[torch.Tensor]:
if self._verts_uvs_list is None:
if self.isempty():
self._verts_uvs_list = [
torch.empty((0, 2), dtype=torch.float32, device=self.device)
] * self._N
else:
# The number of vertices in the mesh and in verts_uvs can differ
# e.g. if a vertex is shared between 3 faces, it can
# have up to 3 different uv coordinates. Therefore we cannot
# convert directly from padded to list using _num_verts_per_mesh
self._verts_uvs_list = list(self._verts_uvs_padded.unbind(0))
return self._verts_uvs_list
# Currently only the padded maps are used.
def maps_padded(self) -> torch.Tensor:
return self._maps_padded
def maps_list(self) -> List[torch.Tensor]:
if self._maps_list is not None:
return self._maps_list
return self._maps_padded.unbind(0)
def extend(self, N: int) -> "TexturesUV":
new_props = self._extend(
N,
[
"maps_padded",
"verts_uvs_padded",
"faces_uvs_padded",
"_num_faces_per_mesh",
],
)
new_tex = TexturesUV(
maps=new_props["maps_padded"],
faces_uvs=new_props["faces_uvs_padded"],
verts_uvs=new_props["verts_uvs_padded"],
padding_mode=self.padding_mode,
align_corners=self.align_corners,
)
new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"]
return new_tex
def sample_textures(self, fragments, **kwargs) -> torch.Tensor:
"""
Interpolate a 2D texture map using uv vertex texture coordinates for each
face in the mesh. First interpolate the vertex uvs using barycentric coordinates
for each pixel in the rasterized output. Then interpolate the texture map
using the uv coordinate for each pixel.
Args:
fragments:
The outputs of rasterization. From this we use
- pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices
of the faces (in the packed representation) which
overlap each pixel in the image.
- barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
the barycentric coordianates of each pixel
relative to the faces (in the packed
representation) which overlap the pixel.
Returns:
texels: tensor of shape (N, H, W, K, C) giving the interpolated
texture for each pixel in the rasterized image.
"""
if self.isempty():
faces_verts_uvs = torch.zeros(
(self._N, 3, 2), dtype=torch.float32, device=self.device
)
else:
packing_list = [
i[j] for i, j in zip(self.verts_uvs_list(), self.faces_uvs_list())
]
faces_verts_uvs = torch.cat(packing_list)
texture_maps = self.maps_padded()
# pixel_uvs: (N, H, W, K, 2)
pixel_uvs = interpolate_face_attributes(
fragments.pix_to_face, fragments.bary_coords, faces_verts_uvs
)
N, H_out, W_out, K = fragments.pix_to_face.shape
N, H_in, W_in, C = texture_maps.shape # 3 for RGB
# pixel_uvs: (N, H, W, K, 2) -> (N, K, H, W, 2) -> (NK, H, W, 2)
pixel_uvs = pixel_uvs.permute(0, 3, 1, 2, 4).reshape(N * K, H_out, W_out, 2)
# textures.map:
# (N, H, W, C) -> (N, C, H, W) -> (1, N, C, H, W)
# -> expand (K, N, C, H, W) -> reshape (N*K, C, H, W)
texture_maps = (
texture_maps.permute(0, 3, 1, 2)[None, ...]
.expand(K, -1, -1, -1, -1)
.transpose(0, 1)
.reshape(N * K, C, H_in, W_in)
)
# Textures: (N*K, C, H, W), pixel_uvs: (N*K, H, W, 2)
# Now need to format the pixel uvs and the texture map correctly!
# From pytorch docs, grid_sample takes `grid` and `input`:
# grid specifies the sampling pixel locations normalized by
# the input spatial dimensions It should have most
# values in the range of [-1, 1]. Values x = -1, y = -1
# is the left-top pixel of input, and values x = 1, y = 1 is the
# right-bottom pixel of input.
pixel_uvs = pixel_uvs * 2.0 - 1.0
texture_maps = torch.flip(texture_maps, [2]) # flip y axis of the texture map
if texture_maps.device != pixel_uvs.device:
texture_maps = texture_maps.to(pixel_uvs.device)
texels = F.grid_sample(
texture_maps,
pixel_uvs,
align_corners=self.align_corners,
padding_mode=self.padding_mode,
)
# texels now has shape (NK, C, H_out, W_out)
texels = texels.reshape(N, K, C, H_out, W_out).permute(0, 3, 4, 1, 2)
return texels
def join_batch(self, textures: List["TexturesUV"]) -> "TexturesUV":
"""
Join the list of textures given by `textures` to
self to create a batch of textures. Return a new
TexturesUV object with the combined textures.
Args:
textures: List of TexturesUV objects
Returns:
new_tex: TexturesUV object with the combined
textures from self and the list `textures`.
"""
tex_types_same = all(isinstance(tex, TexturesUV) for tex in textures)
if not tex_types_same:
raise ValueError("All textures must be of type TexturesUV.")
padding_modes_same = all(
tex.padding_mode == self.padding_mode for tex in textures
)
if not padding_modes_same:
raise ValueError("All textures must have the same padding_mode.")
align_corners_same = all(
tex.align_corners == self.align_corners for tex in textures
)
if not align_corners_same:
raise ValueError("All textures must have the same align_corners value.")
verts_uvs_list = []
faces_uvs_list = []
maps_list = []
faces_uvs_list += self.faces_uvs_list()
verts_uvs_list += self.verts_uvs_list()
maps_list += list(self.maps_padded().unbind(0))
num_faces_per_mesh = self._num_faces_per_mesh
for tex in textures:
verts_uvs_list += tex.verts_uvs_list()
faces_uvs_list += tex.faces_uvs_list()
num_faces_per_mesh += tex._num_faces_per_mesh
tex_map_list = list(tex.maps_padded().unbind(0))
maps_list += tex_map_list
new_tex = self.__class__(
maps=maps_list,
verts_uvs=verts_uvs_list,
faces_uvs=faces_uvs_list,
padding_mode=self.padding_mode,
align_corners=self.align_corners,
)
new_tex._num_faces_per_mesh = num_faces_per_mesh
return new_tex
def _place_map_into_single_map(
self,
single_map: torch.Tensor,
map_: torch.Tensor,
location: Tuple[int, int, bool], # (x,y) and whether flipped
) -> None:
"""
Copy map into a larger tensor single_map at the destination specified by location.
If align_corners is False, we add the needed border around the destination.
Used by join_scene.
Args:
single_map: (total_H, total_W, 3)
map_: (H, W, 3) source data
location: where to place map
"""
do_flip = location[2]
source = map_.transpose(0, 1) if do_flip else map_
border_width = 0 if self.align_corners else 1
lower_u = location[0] + border_width
lower_v = location[1] + border_width
upper_u = lower_u + source.shape[0]
upper_v = lower_v + source.shape[1]
single_map[lower_u:upper_u, lower_v:upper_v] = source
if self.padding_mode != "zeros" and not self.align_corners:
single_map[lower_u - 1, lower_v:upper_v] = single_map[
lower_u, lower_v:upper_v
]
single_map[upper_u, lower_v:upper_v] = single_map[
upper_u - 1, lower_v:upper_v
]
single_map[lower_u:upper_u, lower_v - 1] = single_map[
lower_u:upper_u, lower_v
]
single_map[lower_u:upper_u, upper_v] = single_map[
lower_u:upper_u, upper_v - 1
]
single_map[lower_u - 1, lower_v - 1] = single_map[lower_u, lower_v]
single_map[lower_u - 1, upper_v] = single_map[lower_u, upper_v - 1]
single_map[upper_u, lower_v - 1] = single_map[upper_u - 1, lower_v]
single_map[upper_u, upper_v] = single_map[upper_u - 1, upper_v - 1]
def join_scene(self) -> "TexturesUV":
"""
Return a new TexturesUV amalgamating the batch.
We calculate a large single map which contains the original maps,
and find verts_uvs to point into it. This will not replicate
behavior of padding for verts_uvs values outside [0,1].
If align_corners=False, we need to add an artificial border around
every map.
We use the function `pack_rectangles` to provide a layout for the
single map. _place_map_into_single_map is used to copy the maps
into the single map. The merging of verts_uvs and faces_uvs are
handled locally in this function.
"""
maps = self.maps_list()
heights_and_widths = []
extra_border = 0 if self.align_corners else 2
for map_ in maps:
heights_and_widths.append(
(map_.shape[0] + extra_border, map_.shape[1] + extra_border)
)
merging_plan = pack_rectangles(heights_and_widths)
# pyre-fixme[16]: `Tensor` has no attribute `new_zeros`.
single_map = maps[0].new_zeros((*merging_plan.total_size, 3))
verts_uvs = self.verts_uvs_list()
verts_uvs_merged = []
for map_, loc, uvs in zip(maps, merging_plan.locations, verts_uvs):
new_uvs = uvs.clone()
self._place_map_into_single_map(single_map, map_, loc)
do_flip = loc[2]
x_shape = map_.shape[1] if do_flip else map_.shape[0]
y_shape = map_.shape[0] if do_flip else map_.shape[1]
if do_flip:
# Here we have flipped / transposed the map.
# In uvs, the y values are decreasing from 1 to 0 and the x
# values increase from 0 to 1. We subtract all values from 1
# as the x's become y's and the y's become x's.
new_uvs = 1.0 - new_uvs[:, [1, 0]]
if TYPE_CHECKING:
new_uvs = torch.Tensor(new_uvs)
# If align_corners is True, then an index of x (where x is in
# the range 0 .. map_.shape[]-1) in one of the input maps
# was hit by a u of x/(map_.shape[]-1).
# That x is located at the index loc[] + x in the single_map, and
# to hit that we need u to equal (loc[] + x) / (total_size[]-1)
# so the old u should be mapped to
# { u*(map_.shape[]-1) + loc[] } / (total_size[]-1)
# If align_corners is False, then an index of x (where x is in
# the range 1 .. map_.shape[]-2) in one of the input maps
# was hit by a u of (x+0.5)/(map_.shape[]).
# That x is located at the index loc[] + 1 + x in the single_map,
# (where the 1 is for the border)
# and to hit that we need u to equal (loc[] + 1 + x + 0.5) / (total_size[])
# so the old u should be mapped to
# { loc[] + 1 + u*map_.shape[]-0.5 + 0.5 } / (total_size[])
# = { loc[] + 1 + u*map_.shape[] } / (total_size[])
# We change the y's in new_uvs for the scaling of height,
# and the x's for the scaling of width.
# That is why the 1's and 0's are mismatched in these lines.
one_if_align = 1 if self.align_corners else 0
one_if_not_align = 1 - one_if_align
denom_x = merging_plan.total_size[0] - one_if_align
scale_x = x_shape - one_if_align
denom_y = merging_plan.total_size[1] - one_if_align
scale_y = y_shape - one_if_align
new_uvs[:, 1] *= scale_x / denom_x
new_uvs[:, 1] += (loc[0] + one_if_not_align) / denom_x
new_uvs[:, 0] *= scale_y / denom_y
new_uvs[:, 0] += (loc[1] + one_if_not_align) / denom_y
verts_uvs_merged.append(new_uvs)
faces_uvs_merged = []
offset = 0
for faces_uvs_, verts_uvs_ in zip(self.faces_uvs_list(), verts_uvs):
faces_uvs_merged.append(offset + faces_uvs_)
offset += verts_uvs_.shape[0]
return self.__class__(
maps=[single_map],
verts_uvs=[torch.cat(verts_uvs_merged)],
faces_uvs=[torch.cat(faces_uvs_merged)],
align_corners=self.align_corners,
padding_mode=self.padding_mode,
)
class TexturesVertex(TexturesBase):
def __init__(
self,
verts_features: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]],
):
"""
Batched texture representation where each vertex in a mesh
has a D dimensional feature vector.
Args:
verts_features: list of (Vi, D) or (N, V, D) tensor giving a feature
vector with artbitrary dimensions for each vertex.
"""
if isinstance(verts_features, (tuple, list)):
correct_shape = all(
(torch.is_tensor(v) and v.ndim == 2) for v in verts_features
)
if not correct_shape:
raise ValueError(
"Expected verts_features to be a list of tensors of shape (V, D)."
)
self._verts_features_list = verts_features
self._verts_features_padded = None
self.device = torch.device("cpu")
# These values may be overridden when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
self._N = len(verts_features)
self._num_verts_per_mesh = [len(fv) for fv in verts_features]
if self._N > 0:
self.device = verts_features[0].device
elif torch.is_tensor(verts_features):
if verts_features.ndim != 3:
msg = "Expected verts_features to be of shape (N, V, D); got %r"
raise ValueError(msg % repr(verts_features.shape))
self._verts_features_padded = verts_features
self._verts_features_list = None
self.device = verts_features.device
# These values may be overridden when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
self._N = len(verts_features)
max_F = verts_features.shape[1]
self._num_verts_per_mesh = [max_F] * self._N
else:
raise ValueError("verts_features must be a tensor or list of tensors")
# This is set inside the Meshes object when textures is
# passed into the Meshes constructor. For more details
# refer to the __init__ of Meshes.
self.valid = torch.ones((self._N,), dtype=torch.bool, device=self.device)
def clone(self):
tex = self.__class__(self.verts_features_padded().clone())
if self._verts_features_list is not None:
tex._verts_features_list = [f.clone() for f in self._verts_features_list]
num_verts = (
self._num_verts_per_mesh.clone()
if torch.is_tensor(self._num_verts_per_mesh)
else self._num_verts_per_mesh
)
tex._num_verts_per_mesh = num_verts
tex.valid = self.valid.clone()
return tex
def detach(self):
tex = self.__class__(self.verts_features_padded().detach())
if self._verts_features_list is not None:
tex._verts_features_list = [f.detach() for f in self._verts_features_list]
num_verts = (
self._num_verts_per_mesh.detach()
if torch.is_tensor(self._num_verts_per_mesh)
else self._num_verts_per_mesh
)
tex._num_verts_per_mesh = num_verts
tex.valid = self.valid.detach()
return tex
def __getitem__(self, index):
props = ["verts_features_list", "_num_verts_per_mesh"]
new_props = self._getitem(index, props)
verts_features = new_props["verts_features_list"]
if isinstance(verts_features, list):
new_tex = self.__class__(verts_features=verts_features)
elif torch.is_tensor(verts_features):
new_tex = self.__class__(verts_features=[verts_features])
else:
raise ValueError("Not all values are provided in the correct format")
new_tex._num_verts_per_mesh = new_props["_num_verts_per_mesh"]
return new_tex
def verts_features_padded(self) -> torch.Tensor:
if self._verts_features_padded is None:
if self.isempty():
self._verts_features_padded = torch.zeros(
(self._N, 0, 3, 0), dtype=torch.float32, device=self.device
)
else:
self._verts_features_padded = list_to_padded(
self._verts_features_list, pad_value=0.0
)
return self._verts_features_padded
def verts_features_list(self) -> List[torch.Tensor]:
if self._verts_features_list is None:
if self.isempty():
self._verts_features_list = [
torch.empty((0, 3), dtype=torch.float32, device=self.device)
] * self._N
else:
self._verts_features_list = padded_to_list(
self._verts_features_padded, split_size=self._num_verts_per_mesh
)
return self._verts_features_list
def verts_features_packed(self) -> torch.Tensor:
if self.isempty():
return torch.zeros((self._N, 3, 0), dtype=torch.float32, device=self.device)
verts_features_list = self.verts_features_list()
return list_to_packed(verts_features_list)[0]
def extend(self, N: int) -> "TexturesVertex":
new_props = self._extend(N, ["verts_features_padded", "_num_verts_per_mesh"])
new_tex = TexturesVertex(verts_features=new_props["verts_features_padded"])
new_tex._num_verts_per_mesh = new_props["_num_verts_per_mesh"]
return new_tex
def sample_textures(self, fragments, faces_packed=None) -> torch.Tensor:
"""
Detemine the color for each rasterized face. Interpolate the colors for
vertices which form the face using the barycentric coordinates.
Args:
fragments:
The outputs of rasterization. From this we use
- pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices
of the faces (in the packed representation) which
overlap each pixel in the image.
- barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
the barycentric coordianates of each pixel
relative to the faces (in the packed
representation) which overlap the pixel.
Returns:
texels: An texture per pixel of shape (N, H, W, K, C).
There will be one C dimensional value for each element in
fragments.pix_to_face.
"""
verts_features_packed = self.verts_features_packed()
faces_verts_features = verts_features_packed[faces_packed]
texels = interpolate_face_attributes(
fragments.pix_to_face, fragments.bary_coords, faces_verts_features
)
return texels
def join_batch(self, textures: List["TexturesVertex"]) -> "TexturesVertex":
"""
Join the list of textures given by `textures` to
self to create a batch of textures. Return a new
TexturesVertex object with the combined textures.
Args:
textures: List of TexturesVertex objects
Returns:
new_tex: TexturesVertex object with the combined
textures from self and the list `textures`.
"""
tex_types_same = all(isinstance(tex, TexturesVertex) for tex in textures)
if not tex_types_same:
raise ValueError("All textures must be of type TexturesVertex.")
verts_features_list = []
verts_features_list += self.verts_features_list()
num_faces_per_mesh = self._num_verts_per_mesh
for tex in textures:
verts_features_list += tex.verts_features_list()
num_faces_per_mesh += tex._num_verts_per_mesh
new_tex = self.__class__(verts_features=verts_features_list)
new_tex._num_verts_per_mesh = num_faces_per_mesh
return new_tex
def join_scene(self) -> "TexturesVertex":
"""
Return a new TexturesVertex amalgamating the batch.
"""
return self.__class__(verts_features=[torch.cat(self.verts_features_list())])