mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2026-02-06 22:12:16 +08:00
Update latest version of site
This commit is contained in:
@@ -81,12 +81,22 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install torch torchvision\n",
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):\n",
|
||||
" !pip install pytorch3d\n",
|
||||
"else:\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" need_pytorch3d=False\n",
|
||||
" try:\n",
|
||||
" import pytorch3d\n",
|
||||
" except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -41,12 +41,22 @@
|
||||
|
||||
|
||||
get_ipython().system('pip install torch torchvision')
|
||||
import os
|
||||
import sys
|
||||
import torch
|
||||
if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):
|
||||
get_ipython().system('pip install pytorch3d')
|
||||
else:
|
||||
get_ipython().system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
|
||||
need_pytorch3d=False
|
||||
try:
|
||||
import pytorch3d
|
||||
except ModuleNotFoundError:
|
||||
need_pytorch3d=True
|
||||
if need_pytorch3d:
|
||||
get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz')
|
||||
get_ipython().system('tar xzf 1.10.0.tar.gz')
|
||||
os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0"
|
||||
get_ipython().system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
@@ -68,12 +68,22 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install torch torchvision\n",
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):\n",
|
||||
" !pip install pytorch3d\n",
|
||||
"else:\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" need_pytorch3d=False\n",
|
||||
" try:\n",
|
||||
" import pytorch3d\n",
|
||||
" except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -350,8 +360,8 @@
|
||||
" self.device = meshes.device\n",
|
||||
" self.renderer = renderer\n",
|
||||
" \n",
|
||||
" # Get the silhouette of the reference RGB image by finding all the non zero values. \n",
|
||||
" image_ref = torch.from_numpy((image_ref[..., :3].max(-1) != 0).astype(np.float32))\n",
|
||||
" # Get the silhouette of the reference RGB image by finding all non-white pixel values. \n",
|
||||
" image_ref = torch.from_numpy((image_ref[..., :3].max(-1) != 1).astype(np.float32))\n",
|
||||
" self.register_buffer('image_ref', image_ref)\n",
|
||||
" \n",
|
||||
" # Create an optimizable parameter for the x, y, z position of the camera. \n",
|
||||
|
||||
@@ -28,12 +28,22 @@
|
||||
|
||||
|
||||
get_ipython().system('pip install torch torchvision')
|
||||
import os
|
||||
import sys
|
||||
import torch
|
||||
if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):
|
||||
get_ipython().system('pip install pytorch3d')
|
||||
else:
|
||||
get_ipython().system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
|
||||
need_pytorch3d=False
|
||||
try:
|
||||
import pytorch3d
|
||||
except ModuleNotFoundError:
|
||||
need_pytorch3d=True
|
||||
if need_pytorch3d:
|
||||
get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz')
|
||||
get_ipython().system('tar xzf 1.10.0.tar.gz')
|
||||
os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0"
|
||||
get_ipython().system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
|
||||
|
||||
|
||||
# In[ ]:
|
||||
@@ -212,8 +222,8 @@ class Model(nn.Module):
|
||||
self.device = meshes.device
|
||||
self.renderer = renderer
|
||||
|
||||
# Get the silhouette of the reference RGB image by finding all the non zero values.
|
||||
image_ref = torch.from_numpy((image_ref[..., :3].max(-1) != 0).astype(np.float32))
|
||||
# Get the silhouette of the reference RGB image by finding all non-white pixel values.
|
||||
image_ref = torch.from_numpy((image_ref[..., :3].max(-1) != 1).astype(np.float32))
|
||||
self.register_buffer('image_ref', image_ref)
|
||||
|
||||
# Create an optimizable parameter for the x, y, z position of the camera.
|
||||
|
||||
@@ -43,12 +43,22 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install torch torchvision\n",
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):\n",
|
||||
" !pip install pytorch3d\n",
|
||||
"else:\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" need_pytorch3d=False\n",
|
||||
" try:\n",
|
||||
" import pytorch3d\n",
|
||||
" except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -23,12 +23,22 @@
|
||||
|
||||
|
||||
get_ipython().system('pip install torch torchvision')
|
||||
import os
|
||||
import sys
|
||||
import torch
|
||||
if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):
|
||||
get_ipython().system('pip install pytorch3d')
|
||||
else:
|
||||
get_ipython().system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
|
||||
need_pytorch3d=False
|
||||
try:
|
||||
import pytorch3d
|
||||
except ModuleNotFoundError:
|
||||
need_pytorch3d=True
|
||||
if need_pytorch3d:
|
||||
get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz')
|
||||
get_ipython().system('tar xzf 1.10.0.tar.gz')
|
||||
os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0"
|
||||
get_ipython().system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
@@ -82,12 +82,22 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install torch torchvision\n",
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):\n",
|
||||
" !pip install pytorch3d\n",
|
||||
"else:\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" need_pytorch3d=False\n",
|
||||
" try:\n",
|
||||
" import pytorch3d\n",
|
||||
" except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -40,12 +40,22 @@
|
||||
|
||||
|
||||
get_ipython().system('pip install torch torchvision')
|
||||
import os
|
||||
import sys
|
||||
import torch
|
||||
if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):
|
||||
get_ipython().system('pip install pytorch3d')
|
||||
else:
|
||||
get_ipython().system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
|
||||
need_pytorch3d=False
|
||||
try:
|
||||
import pytorch3d
|
||||
except ModuleNotFoundError:
|
||||
need_pytorch3d=True
|
||||
if need_pytorch3d:
|
||||
get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz')
|
||||
get_ipython().system('tar xzf 1.10.0.tar.gz')
|
||||
os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0"
|
||||
get_ipython().system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
@@ -60,12 +60,22 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install torch torchvision\n",
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):\n",
|
||||
" !pip install pytorch3d\n",
|
||||
"else:\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" need_pytorch3d=False\n",
|
||||
" try:\n",
|
||||
" import pytorch3d\n",
|
||||
" except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -23,12 +23,22 @@
|
||||
|
||||
|
||||
get_ipython().system('pip install torch torchvision')
|
||||
import os
|
||||
import sys
|
||||
import torch
|
||||
if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):
|
||||
get_ipython().system('pip install pytorch3d')
|
||||
else:
|
||||
get_ipython().system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
|
||||
need_pytorch3d=False
|
||||
try:
|
||||
import pytorch3d
|
||||
except ModuleNotFoundError:
|
||||
need_pytorch3d=True
|
||||
if need_pytorch3d:
|
||||
get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz')
|
||||
get_ipython().system('tar xzf 1.10.0.tar.gz')
|
||||
os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0"
|
||||
get_ipython().system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
480
files/render_colored_points.ipynb
Normal file
480
files/render_colored_points.ipynb
Normal file
@@ -0,0 +1,480 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Render a colored point cloud\n",
|
||||
"\n",
|
||||
"This tutorial shows how to:\n",
|
||||
"- set up a renderer \n",
|
||||
"- render the point cloud \n",
|
||||
"- vary the rendering settings such as compositing and camera position"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Import modules"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install torch torchvision\n",
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):\n",
|
||||
" !pip install pytorch3d\n",
|
||||
"else:\n",
|
||||
" need_pytorch3d=False\n",
|
||||
" try:\n",
|
||||
" import pytorch3d\n",
|
||||
" except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import torch\n",
|
||||
"import torch.nn.functional as F\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from skimage.io import imread\n",
|
||||
"\n",
|
||||
"# Util function for loading point clouds|\n",
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"# Data structures and functions for rendering\n",
|
||||
"from pytorch3d.structures import Pointclouds\n",
|
||||
"from pytorch3d.vis.plotly_vis import AxisArgs, plot_batch_individually, plot_scene\n",
|
||||
"from pytorch3d.renderer import (\n",
|
||||
" look_at_view_transform,\n",
|
||||
" FoVOrthographicCameras, \n",
|
||||
" PointsRasterizationSettings,\n",
|
||||
" PointsRenderer,\n",
|
||||
" PulsarPointsRenderer,\n",
|
||||
" PointsRasterizer,\n",
|
||||
" AlphaCompositor,\n",
|
||||
" NormWeightedCompositor\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Load a point cloud and corresponding colors\n",
|
||||
"\n",
|
||||
"Load and create a **Point Cloud** object. \n",
|
||||
"\n",
|
||||
"**Pointclouds** is a unique datastructure provided in PyTorch3D for working with batches of point clouds of different sizes. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If running this notebook using **Google Colab**, run the following cell to fetch the pointcloud data and save it at the path `data/PittsburghBridge`:\n",
|
||||
"If running locally, the data is already available at the correct path. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!mkdir -p data/PittsburghBridge\n",
|
||||
"!wget -P data/PittsburghBridge https://dl.fbaipublicfiles.com/pytorch3d/data/PittsburghBridge/pointcloud.npz"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Setup\n",
|
||||
"if torch.cuda.is_available():\n",
|
||||
" device = torch.device(\"cuda:0\")\n",
|
||||
" torch.cuda.set_device(device)\n",
|
||||
"else:\n",
|
||||
" device = torch.device(\"cpu\")\n",
|
||||
"\n",
|
||||
"# Set paths\n",
|
||||
"DATA_DIR = \"./data\"\n",
|
||||
"obj_filename = os.path.join(DATA_DIR, \"PittsburghBridge/pointcloud.npz\")\n",
|
||||
"\n",
|
||||
"# Load point cloud\n",
|
||||
"pointcloud = np.load(obj_filename)\n",
|
||||
"verts = torch.Tensor(pointcloud['verts']).to(device)\n",
|
||||
" \n",
|
||||
"rgb = torch.Tensor(pointcloud['rgb']).to(device)\n",
|
||||
"\n",
|
||||
"point_cloud = Pointclouds(points=[verts], features=[rgb])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create a renderer\n",
|
||||
"\n",
|
||||
"A renderer in PyTorch3D is composed of a **rasterizer** and a **shader** which each have a number of subcomponents such as a **camera** (orthgraphic/perspective). Here we initialize some of these components and use default values for the rest.\n",
|
||||
"\n",
|
||||
"In this example we will first create a **renderer** which uses an **orthographic camera**, and applies **alpha compositing**. Then we learn how to vary different components using the modular API. \n",
|
||||
"\n",
|
||||
"[1] <a href=\"https://arxiv.org/abs/1912.08804\">SynSin: End to end View Synthesis from a Single Image.</a> Olivia Wiles, Georgia Gkioxari, Richard Szeliski, Justin Johnson. CVPR 2020."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize a camera.\n",
|
||||
"R, T = look_at_view_transform(20, 10, 0)\n",
|
||||
"cameras = FoVOrthographicCameras(device=device, R=R, T=T, znear=0.01)\n",
|
||||
"\n",
|
||||
"# Define the settings for rasterization and shading. Here we set the output image to be of size\n",
|
||||
"# 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1\n",
|
||||
"# and blur_radius=0.0. Refer to raster_points.py for explanations of these parameters. \n",
|
||||
"raster_settings = PointsRasterizationSettings(\n",
|
||||
" image_size=512, \n",
|
||||
" radius = 0.003,\n",
|
||||
" points_per_pixel = 10\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Create a points renderer by compositing points using an alpha compositor (nearer points\n",
|
||||
"# are weighted more heavily). See [1] for an explanation.\n",
|
||||
"rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)\n",
|
||||
"renderer = PointsRenderer(\n",
|
||||
" rasterizer=rasterizer,\n",
|
||||
" compositor=AlphaCompositor()\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"images = renderer(point_cloud)\n",
|
||||
"plt.figure(figsize=(10, 10))\n",
|
||||
"plt.imshow(images[0, ..., :3].cpu().numpy())\n",
|
||||
"plt.grid(\"off\")\n",
|
||||
"plt.axis(\"off\");"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We will now modify the **renderer** to use **alpha compositing** with a set background color. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"renderer = PointsRenderer(\n",
|
||||
" rasterizer=rasterizer,\n",
|
||||
" # Pass in background_color to the alpha compositor, setting the background color \n",
|
||||
" # to the 3 item tuple, representing rgb on a scale of 0 -> 1, in this case blue\n",
|
||||
" compositor=AlphaCompositor(background_color=(0, 0, 1))\n",
|
||||
")\n",
|
||||
"images = renderer(point_cloud)\n",
|
||||
"\n",
|
||||
"plt.figure(figsize=(10, 10))\n",
|
||||
"plt.imshow(images[0, ..., :3].cpu().numpy())\n",
|
||||
"plt.grid(\"off\")\n",
|
||||
"plt.axis(\"off\");"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this example we will first create a **renderer** which uses an **orthographic camera**, and applies **weighted compositing**. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize a camera.\n",
|
||||
"R, T = look_at_view_transform(20, 10, 0)\n",
|
||||
"cameras = FoVOrthographicCameras(device=device, R=R, T=T, znear=0.01)\n",
|
||||
"\n",
|
||||
"# Define the settings for rasterization and shading. Here we set the output image to be of size\n",
|
||||
"# 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1\n",
|
||||
"# and blur_radius=0.0. Refer to rasterize_points.py for explanations of these parameters. \n",
|
||||
"raster_settings = PointsRasterizationSettings(\n",
|
||||
" image_size=512, \n",
|
||||
" radius = 0.003,\n",
|
||||
" points_per_pixel = 10\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Create a points renderer by compositing points using an weighted compositor (3D points are\n",
|
||||
"# weighted according to their distance to a pixel and accumulated using a weighted sum)\n",
|
||||
"renderer = PointsRenderer(\n",
|
||||
" rasterizer=PointsRasterizer(cameras=cameras, raster_settings=raster_settings),\n",
|
||||
" compositor=NormWeightedCompositor()\n",
|
||||
")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"images = renderer(point_cloud)\n",
|
||||
"plt.figure(figsize=(10, 10))\n",
|
||||
"plt.imshow(images[0, ..., :3].cpu().numpy())\n",
|
||||
"plt.grid(\"off\")\n",
|
||||
"plt.axis(\"off\");"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We will now modify the **renderer** to use **weighted compositing** with a set background color. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"renderer = PointsRenderer(\n",
|
||||
" rasterizer=PointsRasterizer(cameras=cameras, raster_settings=raster_settings),\n",
|
||||
" # Pass in background_color to the norm weighted compositor, setting the background color \n",
|
||||
" # to the 3 item tuple, representing rgb on a scale of 0 -> 1, in this case red\n",
|
||||
" compositor=NormWeightedCompositor(background_color=(1,0,0))\n",
|
||||
")\n",
|
||||
"images = renderer(point_cloud)\n",
|
||||
"plt.figure(figsize=(10, 10))\n",
|
||||
"plt.imshow(images[0, ..., :3].cpu().numpy())\n",
|
||||
"plt.grid(\"off\")\n",
|
||||
"plt.axis(\"off\");"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Using the pulsar backend\n",
|
||||
"\n",
|
||||
"Switching to the pulsar backend is easy! The pulsar backend has a compositor built-in, so the `compositor` argument is not required when creating it (a warning will be displayed if you provide it nevertheless). It pre-allocates memory on the rendering device, that's why it needs the `n_channels` at construction time.\n",
|
||||
"\n",
|
||||
"All parameters for the renderer forward function are batch-wise except the background color (in this example, `gamma`) and you have to provide as many values as you have examples in your batch. The background color is optional and by default set to all zeros. You can find a detailed explanation of how gamma influences the rendering function here in the paper [Fast Differentiable Raycasting for Neural Rendering using\n",
|
||||
"Sphere-based Representations](https://arxiv.org/pdf/2004.07484.pdf).\n",
|
||||
"\n",
|
||||
"You can also use the `native` backend for the pulsar backend which already provides access to point opacity. The native backend can be imported from `pytorch3d.renderer.points.pulsar`; you can find examples for this in the folder `docs/examples`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"renderer = PulsarPointsRenderer(\n",
|
||||
" rasterizer=PointsRasterizer(cameras=cameras, raster_settings=raster_settings),\n",
|
||||
" n_channels=4\n",
|
||||
").to(device)\n",
|
||||
"\n",
|
||||
"images = renderer(point_cloud, gamma=(1e-4,),\n",
|
||||
" bg_col=torch.tensor([0.0, 1.0, 0.0, 1.0], dtype=torch.float32, device=device))\n",
|
||||
"plt.figure(figsize=(10, 10))\n",
|
||||
"plt.imshow(images[0, ..., :3].cpu().numpy())\n",
|
||||
"plt.grid(\"off\")\n",
|
||||
"plt.axis(\"off\");"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### View pointclouds in Plotly figures\n",
|
||||
"\n",
|
||||
"Here we use the PyTorch3D function `plot_scene` to render the pointcloud in a Plotly figure. `plot_scene` returns a plotly figure with trace and subplots defined by the input."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"plot_scene({\n",
|
||||
" \"Pointcloud\": {\n",
|
||||
" \"person\": point_cloud\n",
|
||||
" }\n",
|
||||
"})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We will now render a batch of pointclouds. The first pointcloud is the same as above, and the second is all-black and offset by 2 in all dimensions so we can see them on the same plot. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"point_cloud_batch = Pointclouds(points=[verts, verts + 2], features=[rgb, torch.zeros_like(rgb)])\n",
|
||||
"# render both in the same plot in different traces\n",
|
||||
"fig = plot_scene({\n",
|
||||
" \"Pointcloud\": {\n",
|
||||
" \"person\": point_cloud_batch[0],\n",
|
||||
" \"person2\": point_cloud_batch[1]\n",
|
||||
" }\n",
|
||||
"})\n",
|
||||
"fig.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# render both in the same plot in one trace\n",
|
||||
"fig = plot_scene({\n",
|
||||
" \"Pointcloud\": {\n",
|
||||
" \"2 people\": point_cloud_batch\n",
|
||||
" }\n",
|
||||
"})\n",
|
||||
"fig.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For batches, we can also use `plot_batch_individually` to avoid constructing the scene dictionary ourselves."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# render both in 1 row in different subplots\n",
|
||||
"fig2 = plot_batch_individually(point_cloud_batch, ncols=2)\n",
|
||||
"fig2.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# modify the plotly figure height and width\n",
|
||||
"fig2.update_layout(height=500, width=500)\n",
|
||||
"fig2.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also modify the axis arguments and axis backgrounds for either function, and title our plots in `plot_batch_individually`."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fig3 = plot_batch_individually(\n",
|
||||
" point_cloud_batch, \n",
|
||||
" xaxis={\"backgroundcolor\":\"rgb(200, 200, 230)\"},\n",
|
||||
" yaxis={\"backgroundcolor\":\"rgb(230, 200, 200)\"},\n",
|
||||
" zaxis={\"backgroundcolor\":\"rgb(200, 230, 200)\"}, \n",
|
||||
" subplot_titles=[\"Pointcloud1\", \"Pointcloud2\"], # this should have a title for each subplot, titles can be \"\"\n",
|
||||
" axis_args=AxisArgs(showgrid=True))\n",
|
||||
"fig3.show()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"bento_stylesheets": {
|
||||
"bento/extensions/flow/main.css": true,
|
||||
"bento/extensions/kernel_selector/main.css": true,
|
||||
"bento/extensions/kernel_ui/main.css": true,
|
||||
"bento/extensions/new_kernel/main.css": true,
|
||||
"bento/extensions/system_usage/main.css": true,
|
||||
"bento/extensions/theme/main.css": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
325
files/render_colored_points.py
Normal file
325
files/render_colored_points.py
Normal file
@@ -0,0 +1,325 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
|
||||
# # Render a colored point cloud
|
||||
#
|
||||
# This tutorial shows how to:
|
||||
# - set up a renderer
|
||||
# - render the point cloud
|
||||
# - vary the rendering settings such as compositing and camera position
|
||||
|
||||
# ## Import modules
|
||||
|
||||
# If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell:
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
get_ipython().system('pip install torch torchvision')
|
||||
import os
|
||||
import sys
|
||||
import torch
|
||||
if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):
|
||||
get_ipython().system('pip install pytorch3d')
|
||||
else:
|
||||
need_pytorch3d=False
|
||||
try:
|
||||
import pytorch3d
|
||||
except ModuleNotFoundError:
|
||||
need_pytorch3d=True
|
||||
if need_pytorch3d:
|
||||
get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz')
|
||||
get_ipython().system('tar xzf 1.10.0.tar.gz')
|
||||
os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0"
|
||||
get_ipython().system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
import os
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
import matplotlib.pyplot as plt
|
||||
from skimage.io import imread
|
||||
|
||||
# Util function for loading point clouds|
|
||||
import numpy as np
|
||||
|
||||
# Data structures and functions for rendering
|
||||
from pytorch3d.structures import Pointclouds
|
||||
from pytorch3d.vis.plotly_vis import AxisArgs, plot_batch_individually, plot_scene
|
||||
from pytorch3d.renderer import (
|
||||
look_at_view_transform,
|
||||
FoVOrthographicCameras,
|
||||
PointsRasterizationSettings,
|
||||
PointsRenderer,
|
||||
PulsarPointsRenderer,
|
||||
PointsRasterizer,
|
||||
AlphaCompositor,
|
||||
NormWeightedCompositor
|
||||
)
|
||||
|
||||
|
||||
# ### Load a point cloud and corresponding colors
|
||||
#
|
||||
# Load and create a **Point Cloud** object.
|
||||
#
|
||||
# **Pointclouds** is a unique datastructure provided in PyTorch3D for working with batches of point clouds of different sizes.
|
||||
|
||||
# If running this notebook using **Google Colab**, run the following cell to fetch the pointcloud data and save it at the path `data/PittsburghBridge`:
|
||||
# If running locally, the data is already available at the correct path.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
get_ipython().system('mkdir -p data/PittsburghBridge')
|
||||
get_ipython().system('wget -P data/PittsburghBridge https://dl.fbaipublicfiles.com/pytorch3d/data/PittsburghBridge/pointcloud.npz')
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# Setup
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device("cuda:0")
|
||||
torch.cuda.set_device(device)
|
||||
else:
|
||||
device = torch.device("cpu")
|
||||
|
||||
# Set paths
|
||||
DATA_DIR = "./data"
|
||||
obj_filename = os.path.join(DATA_DIR, "PittsburghBridge/pointcloud.npz")
|
||||
|
||||
# Load point cloud
|
||||
pointcloud = np.load(obj_filename)
|
||||
verts = torch.Tensor(pointcloud['verts']).to(device)
|
||||
|
||||
rgb = torch.Tensor(pointcloud['rgb']).to(device)
|
||||
|
||||
point_cloud = Pointclouds(points=[verts], features=[rgb])
|
||||
|
||||
|
||||
# ## Create a renderer
|
||||
#
|
||||
# A renderer in PyTorch3D is composed of a **rasterizer** and a **shader** which each have a number of subcomponents such as a **camera** (orthgraphic/perspective). Here we initialize some of these components and use default values for the rest.
|
||||
#
|
||||
# In this example we will first create a **renderer** which uses an **orthographic camera**, and applies **alpha compositing**. Then we learn how to vary different components using the modular API.
|
||||
#
|
||||
# [1] <a href="https://arxiv.org/abs/1912.08804">SynSin: End to end View Synthesis from a Single Image.</a> Olivia Wiles, Georgia Gkioxari, Richard Szeliski, Justin Johnson. CVPR 2020.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# Initialize a camera.
|
||||
R, T = look_at_view_transform(20, 10, 0)
|
||||
cameras = FoVOrthographicCameras(device=device, R=R, T=T, znear=0.01)
|
||||
|
||||
# Define the settings for rasterization and shading. Here we set the output image to be of size
|
||||
# 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
|
||||
# and blur_radius=0.0. Refer to raster_points.py for explanations of these parameters.
|
||||
raster_settings = PointsRasterizationSettings(
|
||||
image_size=512,
|
||||
radius = 0.003,
|
||||
points_per_pixel = 10
|
||||
)
|
||||
|
||||
|
||||
# Create a points renderer by compositing points using an alpha compositor (nearer points
|
||||
# are weighted more heavily). See [1] for an explanation.
|
||||
rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
|
||||
renderer = PointsRenderer(
|
||||
rasterizer=rasterizer,
|
||||
compositor=AlphaCompositor()
|
||||
)
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
images = renderer(point_cloud)
|
||||
plt.figure(figsize=(10, 10))
|
||||
plt.imshow(images[0, ..., :3].cpu().numpy())
|
||||
plt.grid("off")
|
||||
plt.axis("off");
|
||||
|
||||
|
||||
# We will now modify the **renderer** to use **alpha compositing** with a set background color.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
renderer = PointsRenderer(
|
||||
rasterizer=rasterizer,
|
||||
# Pass in background_color to the alpha compositor, setting the background color
|
||||
# to the 3 item tuple, representing rgb on a scale of 0 -> 1, in this case blue
|
||||
compositor=AlphaCompositor(background_color=(0, 0, 1))
|
||||
)
|
||||
images = renderer(point_cloud)
|
||||
|
||||
plt.figure(figsize=(10, 10))
|
||||
plt.imshow(images[0, ..., :3].cpu().numpy())
|
||||
plt.grid("off")
|
||||
plt.axis("off");
|
||||
|
||||
|
||||
# In this example we will first create a **renderer** which uses an **orthographic camera**, and applies **weighted compositing**.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# Initialize a camera.
|
||||
R, T = look_at_view_transform(20, 10, 0)
|
||||
cameras = FoVOrthographicCameras(device=device, R=R, T=T, znear=0.01)
|
||||
|
||||
# Define the settings for rasterization and shading. Here we set the output image to be of size
|
||||
# 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
|
||||
# and blur_radius=0.0. Refer to rasterize_points.py for explanations of these parameters.
|
||||
raster_settings = PointsRasterizationSettings(
|
||||
image_size=512,
|
||||
radius = 0.003,
|
||||
points_per_pixel = 10
|
||||
)
|
||||
|
||||
|
||||
# Create a points renderer by compositing points using an weighted compositor (3D points are
|
||||
# weighted according to their distance to a pixel and accumulated using a weighted sum)
|
||||
renderer = PointsRenderer(
|
||||
rasterizer=PointsRasterizer(cameras=cameras, raster_settings=raster_settings),
|
||||
compositor=NormWeightedCompositor()
|
||||
)
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
images = renderer(point_cloud)
|
||||
plt.figure(figsize=(10, 10))
|
||||
plt.imshow(images[0, ..., :3].cpu().numpy())
|
||||
plt.grid("off")
|
||||
plt.axis("off");
|
||||
|
||||
|
||||
# We will now modify the **renderer** to use **weighted compositing** with a set background color.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
renderer = PointsRenderer(
|
||||
rasterizer=PointsRasterizer(cameras=cameras, raster_settings=raster_settings),
|
||||
# Pass in background_color to the norm weighted compositor, setting the background color
|
||||
# to the 3 item tuple, representing rgb on a scale of 0 -> 1, in this case red
|
||||
compositor=NormWeightedCompositor(background_color=(1,0,0))
|
||||
)
|
||||
images = renderer(point_cloud)
|
||||
plt.figure(figsize=(10, 10))
|
||||
plt.imshow(images[0, ..., :3].cpu().numpy())
|
||||
plt.grid("off")
|
||||
plt.axis("off");
|
||||
|
||||
|
||||
# ## Using the pulsar backend
|
||||
#
|
||||
# Switching to the pulsar backend is easy! The pulsar backend has a compositor built-in, so the `compositor` argument is not required when creating it (a warning will be displayed if you provide it nevertheless). It pre-allocates memory on the rendering device, that's why it needs the `n_channels` at construction time.
|
||||
#
|
||||
# All parameters for the renderer forward function are batch-wise except the background color (in this example, `gamma`) and you have to provide as many values as you have examples in your batch. The background color is optional and by default set to all zeros. You can find a detailed explanation of how gamma influences the rendering function here in the paper [Fast Differentiable Raycasting for Neural Rendering using
|
||||
# Sphere-based Representations](https://arxiv.org/pdf/2004.07484.pdf).
|
||||
#
|
||||
# You can also use the `native` backend for the pulsar backend which already provides access to point opacity. The native backend can be imported from `pytorch3d.renderer.points.pulsar`; you can find examples for this in the folder `docs/examples`.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
renderer = PulsarPointsRenderer(
|
||||
rasterizer=PointsRasterizer(cameras=cameras, raster_settings=raster_settings),
|
||||
n_channels=4
|
||||
).to(device)
|
||||
|
||||
images = renderer(point_cloud, gamma=(1e-4,),
|
||||
bg_col=torch.tensor([0.0, 1.0, 0.0, 1.0], dtype=torch.float32, device=device))
|
||||
plt.figure(figsize=(10, 10))
|
||||
plt.imshow(images[0, ..., :3].cpu().numpy())
|
||||
plt.grid("off")
|
||||
plt.axis("off");
|
||||
|
||||
|
||||
# ### View pointclouds in Plotly figures
|
||||
#
|
||||
# Here we use the PyTorch3D function `plot_scene` to render the pointcloud in a Plotly figure. `plot_scene` returns a plotly figure with trace and subplots defined by the input.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
plot_scene({
|
||||
"Pointcloud": {
|
||||
"person": point_cloud
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
# We will now render a batch of pointclouds. The first pointcloud is the same as above, and the second is all-black and offset by 2 in all dimensions so we can see them on the same plot.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
point_cloud_batch = Pointclouds(points=[verts, verts + 2], features=[rgb, torch.zeros_like(rgb)])
|
||||
# render both in the same plot in different traces
|
||||
fig = plot_scene({
|
||||
"Pointcloud": {
|
||||
"person": point_cloud_batch[0],
|
||||
"person2": point_cloud_batch[1]
|
||||
}
|
||||
})
|
||||
fig.show()
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# render both in the same plot in one trace
|
||||
fig = plot_scene({
|
||||
"Pointcloud": {
|
||||
"2 people": point_cloud_batch
|
||||
}
|
||||
})
|
||||
fig.show()
|
||||
|
||||
|
||||
# For batches, we can also use `plot_batch_individually` to avoid constructing the scene dictionary ourselves.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# render both in 1 row in different subplots
|
||||
fig2 = plot_batch_individually(point_cloud_batch, ncols=2)
|
||||
fig2.show()
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# modify the plotly figure height and width
|
||||
fig2.update_layout(height=500, width=500)
|
||||
fig2.show()
|
||||
|
||||
|
||||
# We can also modify the axis arguments and axis backgrounds for either function, and title our plots in `plot_batch_individually`.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
fig3 = plot_batch_individually(
|
||||
point_cloud_batch,
|
||||
xaxis={"backgroundcolor":"rgb(200, 200, 230)"},
|
||||
yaxis={"backgroundcolor":"rgb(230, 200, 200)"},
|
||||
zaxis={"backgroundcolor":"rgb(200, 230, 200)"},
|
||||
subplot_titles=["Pointcloud1", "Pointcloud2"], # this should have a title for each subplot, titles can be ""
|
||||
axis_args=AxisArgs(showgrid=True))
|
||||
fig3.show()
|
||||
|
||||
432
files/render_densepose.ipynb
Normal file
432
files/render_densepose.ipynb
Normal file
@@ -0,0 +1,432 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Render DensePose \n",
|
||||
"\n",
|
||||
"DensePose refers to dense human pose representation: https://github.com/facebookresearch/DensePose. \n",
|
||||
"In this tutorial, we provide an example of using DensePose data in PyTorch3D.\n",
|
||||
"\n",
|
||||
"This tutorial shows how to:\n",
|
||||
"- load a mesh and textures from densepose `.mat` and `.pkl` files\n",
|
||||
"- set up a renderer \n",
|
||||
"- render the mesh \n",
|
||||
"- vary the rendering settings such as lighting and camera position"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "Bnj3THhzfBLf"
|
||||
},
|
||||
"source": [
|
||||
"## Import modules"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If torch, torchvision and PyTorch3D are not installed, run the following cell:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install torch torchvision\n",
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):\n",
|
||||
" !pip install pytorch3d\n",
|
||||
"else:\n",
|
||||
" need_pytorch3d=False\n",
|
||||
" try:\n",
|
||||
" import pytorch3d\n",
|
||||
" except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# We also install chumpy as it is needed to load the SMPL model pickle file.\n",
|
||||
"!pip install chumpy"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import torch\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from skimage.io import imread\n",
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"# libraries for reading data from files\n",
|
||||
"from scipy.io import loadmat\n",
|
||||
"from pytorch3d.io.utils import _read_image\n",
|
||||
"import pickle\n",
|
||||
"\n",
|
||||
"# Data structures and functions for rendering\n",
|
||||
"from pytorch3d.structures import Meshes\n",
|
||||
"from pytorch3d.renderer import (\n",
|
||||
" look_at_view_transform,\n",
|
||||
" FoVPerspectiveCameras, \n",
|
||||
" PointLights, \n",
|
||||
" DirectionalLights, \n",
|
||||
" Materials, \n",
|
||||
" RasterizationSettings, \n",
|
||||
" MeshRenderer, \n",
|
||||
" MeshRasterizer, \n",
|
||||
" SoftPhongShader,\n",
|
||||
" TexturesUV\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# add path for demo utils functions \n",
|
||||
"import sys\n",
|
||||
"import os\n",
|
||||
"sys.path.append(os.path.abspath(''))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Load the SMPL model\n",
|
||||
"\n",
|
||||
"#### Download the SMPL model\n",
|
||||
"- Go to http://smpl.is.tue.mpg.de/downloads and sign up.\n",
|
||||
"- Download SMPL for Python Users and unzip.\n",
|
||||
"- Copy the file male template file **'models/basicModel_m_lbs_10_207_0_v1.0.0.pkl'** to the data/DensePose/ folder.\n",
|
||||
" - rename the file to **'smpl_model.pkl'** or rename the string where it's commented below\n",
|
||||
" \n",
|
||||
"If running this notebook using Google Colab, run the following cell to fetch the texture and UV values and save it at the correct path."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Texture image\n",
|
||||
"!wget -P data/DensePose https://raw.githubusercontent.com/facebookresearch/DensePose/master/DensePoseData/demo_data/texture_from_SURREAL.png\n",
|
||||
"\n",
|
||||
"# UV_processed.mat\n",
|
||||
"!wget https://dl.fbaipublicfiles.com/densepose/densepose_uv_data.tar.gz\n",
|
||||
"!tar xvf densepose_uv_data.tar.gz -C data/DensePose\n",
|
||||
"!rm densepose_uv_data.tar.gz"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Load our texture UV data and our SMPL data, with some processing to correct data values and format."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Setup\n",
|
||||
"if torch.cuda.is_available():\n",
|
||||
" device = torch.device(\"cuda:0\")\n",
|
||||
" torch.cuda.set_device(device)\n",
|
||||
"else:\n",
|
||||
" device = torch.device(\"cpu\")\n",
|
||||
" \n",
|
||||
"# Set paths\n",
|
||||
"DATA_DIR = \"./data\"\n",
|
||||
"data_filename = os.path.join(DATA_DIR, \"DensePose/UV_Processed.mat\")\n",
|
||||
"tex_filename = os.path.join(DATA_DIR,\"DensePose/texture_from_SURREAL.png\")\n",
|
||||
"# rename your .pkl file or change this string\n",
|
||||
"verts_filename = os.path.join(DATA_DIR, \"DensePose/smpl_model.pkl\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# Load SMPL and texture data\n",
|
||||
"with open(verts_filename, 'rb') as f:\n",
|
||||
" data = pickle.load(f, encoding='latin1') \n",
|
||||
" v_template = torch.Tensor(data['v_template']).to(device) # (6890, 3)\n",
|
||||
"ALP_UV = loadmat(data_filename)\n",
|
||||
"tex = torch.from_numpy(_read_image(file_name=tex_filename, format='RGB') / 255. ).unsqueeze(0).to(device)\n",
|
||||
"\n",
|
||||
"verts = torch.from_numpy((ALP_UV[\"All_vertices\"]).astype(int)).squeeze().to(device) # (7829, 1)\n",
|
||||
"U = torch.Tensor(ALP_UV['All_U_norm']).to(device) # (7829, 1)\n",
|
||||
"V = torch.Tensor(ALP_UV['All_V_norm']).to(device) # (7829, 1)\n",
|
||||
"faces = torch.from_numpy((ALP_UV['All_Faces'] - 1).astype(int)).to(device) # (13774, 3)\n",
|
||||
"face_indices = torch.Tensor(ALP_UV['All_FaceIndices']).squeeze()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Display the texture image\n",
|
||||
"plt.figure(figsize=(10, 10))\n",
|
||||
"plt.imshow(tex.squeeze(0).cpu())\n",
|
||||
"plt.grid(\"off\");\n",
|
||||
"plt.axis(\"off\");"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In DensePose, the body mesh is split into 24 parts. In the texture image, we can see the 24 parts are separated out into individual (200, 200) images per body part. The convention in DensePose is that each face in the mesh is associated with a body part (given by the face_indices tensor above). The vertex UV values (in the range [0, 1]) for each face are specific to the (200, 200) size texture map for the part of the body that the mesh face corresponds to. We cannot use them directly with the entire texture map. We have to offset the vertex UV values depending on what body part the associated face corresponds to."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Map each face to a (u, v) offset\n",
|
||||
"offset_per_part = {}\n",
|
||||
"already_offset = set()\n",
|
||||
"cols, rows = 4, 6\n",
|
||||
"for i, u in enumerate(np.linspace(0, 1, cols, endpoint=False)):\n",
|
||||
" for j, v in enumerate(np.linspace(0, 1, rows, endpoint=False)):\n",
|
||||
" part = rows * i + j + 1 # parts are 1-indexed in face_indices\n",
|
||||
" offset_per_part[part] = (u, v)\n",
|
||||
"\n",
|
||||
"# iterate over faces and offset the corresponding vertex u and v values\n",
|
||||
"for i in range(len(faces)):\n",
|
||||
" face_vert_idxs = faces[i]\n",
|
||||
" part = face_indices[i]\n",
|
||||
" offset_u, offset_v = offset_per_part[int(part.item())]\n",
|
||||
" \n",
|
||||
" for vert_idx in face_vert_idxs: \n",
|
||||
" # vertices are reused, but we don't want to offset multiple times\n",
|
||||
" if vert_idx.item() not in already_offset:\n",
|
||||
" # offset u value\n",
|
||||
" U[vert_idx] = U[vert_idx] / cols + offset_u\n",
|
||||
" # offset v value\n",
|
||||
" # this also flips each part locally, as each part is upside down\n",
|
||||
" V[vert_idx] = (1 - V[vert_idx]) / rows + offset_v\n",
|
||||
" # add vertex to our set tracking offsetted vertices\n",
|
||||
" already_offset.add(vert_idx.item())\n",
|
||||
"\n",
|
||||
"# invert V values\n",
|
||||
"U_norm, V_norm = U, 1 - V"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# create our verts_uv values\n",
|
||||
"verts_uv = torch.cat([U_norm[None],V_norm[None]], dim=2) # (1, 7829, 2)\n",
|
||||
"\n",
|
||||
"# There are 6890 xyz vertex coordinates but 7829 vertex uv coordinates. \n",
|
||||
"# This is because the same vertex can be shared by multiple faces where each face may correspond to a different body part. \n",
|
||||
"# Therefore when initializing the Meshes class,\n",
|
||||
"# we need to map each of the vertices referenced by the DensePose faces (in verts, which is the \"All_vertices\" field)\n",
|
||||
"# to the correct xyz coordinate in the SMPL template mesh.\n",
|
||||
"v_template_extended = torch.stack(list(map(lambda vert: v_template[vert-1], verts))).unsqueeze(0).to(device) # (1, 7829, 3)\n",
|
||||
"\n",
|
||||
"# add a batch dimension to faces\n",
|
||||
"faces = faces.unsqueeze(0)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create our textured mesh \n",
|
||||
"\n",
|
||||
"**Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes.\n",
|
||||
"\n",
|
||||
"**TexturesUV** is an auxillary datastructure for storing vertex uv and texture maps for meshes."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"texture = TexturesUV(maps=tex, faces_uvs=faces, verts_uvs=verts_uv)\n",
|
||||
"mesh = Meshes(v_template_extended, faces, texture)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Create a renderer"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Initialize a camera.\n",
|
||||
"# World coordinates +Y up, +X left and +Z in.\n",
|
||||
"R, T = look_at_view_transform(2.7, 0, 0) \n",
|
||||
"cameras = FoVPerspectiveCameras(device=device, R=R, T=T)\n",
|
||||
"\n",
|
||||
"# Define the settings for rasterization and shading. Here we set the output image to be of size\n",
|
||||
"# 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1\n",
|
||||
"# and blur_radius=0.0. \n",
|
||||
"raster_settings = RasterizationSettings(\n",
|
||||
" image_size=512, \n",
|
||||
" blur_radius=0.0, \n",
|
||||
" faces_per_pixel=1, \n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Place a point light in front of the person. \n",
|
||||
"lights = PointLights(device=device, location=[[0.0, 0.0, 2.0]])\n",
|
||||
"\n",
|
||||
"# Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will \n",
|
||||
"# interpolate the texture uv coordinates for each vertex, sample from a texture image and \n",
|
||||
"# apply the Phong lighting model\n",
|
||||
"renderer = MeshRenderer(\n",
|
||||
" rasterizer=MeshRasterizer(\n",
|
||||
" cameras=cameras, \n",
|
||||
" raster_settings=raster_settings\n",
|
||||
" ),\n",
|
||||
" shader=SoftPhongShader(\n",
|
||||
" device=device, \n",
|
||||
" cameras=cameras,\n",
|
||||
" lights=lights\n",
|
||||
" )\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Render the textured mesh we created from the SMPL model and texture map."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"images = renderer(mesh)\n",
|
||||
"plt.figure(figsize=(10, 10))\n",
|
||||
"plt.imshow(images[0, ..., :3].cpu().numpy())\n",
|
||||
"plt.grid(\"off\");\n",
|
||||
"plt.axis(\"off\");"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Different view and lighting of the body\n",
|
||||
"\n",
|
||||
"We can also change many other settings in the rendering pipeline. Here we:\n",
|
||||
"\n",
|
||||
"- change the **viewing angle** of the camera\n",
|
||||
"- change the **position** of the point light"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Rotate the person by increasing the elevation and azimuth angles to view the back of the person from above. \n",
|
||||
"R, T = look_at_view_transform(2.7, 10, 180)\n",
|
||||
"cameras = FoVPerspectiveCameras(device=device, R=R, T=T)\n",
|
||||
"\n",
|
||||
"# Move the light location so the light is shining on the person's back. \n",
|
||||
"lights.location = torch.tensor([[2.0, 2.0, -2.0]], device=device)\n",
|
||||
"\n",
|
||||
"# Re render the mesh, passing in keyword arguments for the modified components.\n",
|
||||
"images = renderer(mesh, lights=lights, cameras=cameras)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"plt.figure(figsize=(10, 10))\n",
|
||||
"plt.imshow(images[0, ..., :3].cpu().numpy())\n",
|
||||
"plt.grid(\"off\");\n",
|
||||
"plt.axis(\"off\");"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Conclusion\n",
|
||||
"In this tutorial, we've learned how to construct a **textured mesh** from **DensePose model and uv data**, as well as initialize a **Renderer** and change the viewing angle and lighting of our rendered mesh."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"bento_stylesheets": {
|
||||
"bento/extensions/flow/main.css": true,
|
||||
"bento/extensions/kernel_selector/main.css": true,
|
||||
"bento/extensions/kernel_ui/main.css": true,
|
||||
"bento/extensions/new_kernel/main.css": true,
|
||||
"bento/extensions/system_usage/main.css": true,
|
||||
"bento/extensions/theme/main.css": true
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "pytorch3d_etc (local)",
|
||||
"language": "python",
|
||||
"name": "pytorch3d_etc_local"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.5+"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
301
files/render_densepose.py
Normal file
301
files/render_densepose.py
Normal file
@@ -0,0 +1,301 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
||||
|
||||
|
||||
# # Render DensePose
|
||||
#
|
||||
# DensePose refers to dense human pose representation: https://github.com/facebookresearch/DensePose.
|
||||
# In this tutorial, we provide an example of using DensePose data in PyTorch3D.
|
||||
#
|
||||
# This tutorial shows how to:
|
||||
# - load a mesh and textures from densepose `.mat` and `.pkl` files
|
||||
# - set up a renderer
|
||||
# - render the mesh
|
||||
# - vary the rendering settings such as lighting and camera position
|
||||
|
||||
# ## Import modules
|
||||
|
||||
# If torch, torchvision and PyTorch3D are not installed, run the following cell:
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
get_ipython().system('pip install torch torchvision')
|
||||
import os
|
||||
import sys
|
||||
import torch
|
||||
if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):
|
||||
get_ipython().system('pip install pytorch3d')
|
||||
else:
|
||||
need_pytorch3d=False
|
||||
try:
|
||||
import pytorch3d
|
||||
except ModuleNotFoundError:
|
||||
need_pytorch3d=True
|
||||
if need_pytorch3d:
|
||||
get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz')
|
||||
get_ipython().system('tar xzf 1.10.0.tar.gz')
|
||||
os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0"
|
||||
get_ipython().system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# We also install chumpy as it is needed to load the SMPL model pickle file.
|
||||
get_ipython().system('pip install chumpy')
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
import os
|
||||
import torch
|
||||
import matplotlib.pyplot as plt
|
||||
from skimage.io import imread
|
||||
import numpy as np
|
||||
|
||||
# libraries for reading data from files
|
||||
from scipy.io import loadmat
|
||||
from pytorch3d.io.utils import _read_image
|
||||
import pickle
|
||||
|
||||
# Data structures and functions for rendering
|
||||
from pytorch3d.structures import Meshes
|
||||
from pytorch3d.renderer import (
|
||||
look_at_view_transform,
|
||||
FoVPerspectiveCameras,
|
||||
PointLights,
|
||||
DirectionalLights,
|
||||
Materials,
|
||||
RasterizationSettings,
|
||||
MeshRenderer,
|
||||
MeshRasterizer,
|
||||
SoftPhongShader,
|
||||
TexturesUV
|
||||
)
|
||||
|
||||
# add path for demo utils functions
|
||||
import sys
|
||||
import os
|
||||
sys.path.append(os.path.abspath(''))
|
||||
|
||||
|
||||
# ## Load the SMPL model
|
||||
#
|
||||
# #### Download the SMPL model
|
||||
# - Go to http://smpl.is.tue.mpg.de/downloads and sign up.
|
||||
# - Download SMPL for Python Users and unzip.
|
||||
# - Copy the file male template file **'models/basicModel_m_lbs_10_207_0_v1.0.0.pkl'** to the data/DensePose/ folder.
|
||||
# - rename the file to **'smpl_model.pkl'** or rename the string where it's commented below
|
||||
#
|
||||
# If running this notebook using Google Colab, run the following cell to fetch the texture and UV values and save it at the correct path.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# Texture image
|
||||
get_ipython().system('wget -P data/DensePose https://raw.githubusercontent.com/facebookresearch/DensePose/master/DensePoseData/demo_data/texture_from_SURREAL.png')
|
||||
|
||||
# UV_processed.mat
|
||||
get_ipython().system('wget https://dl.fbaipublicfiles.com/densepose/densepose_uv_data.tar.gz')
|
||||
get_ipython().system('tar xvf densepose_uv_data.tar.gz -C data/DensePose')
|
||||
get_ipython().system('rm densepose_uv_data.tar.gz')
|
||||
|
||||
|
||||
# Load our texture UV data and our SMPL data, with some processing to correct data values and format.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# Setup
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device("cuda:0")
|
||||
torch.cuda.set_device(device)
|
||||
else:
|
||||
device = torch.device("cpu")
|
||||
|
||||
# Set paths
|
||||
DATA_DIR = "./data"
|
||||
data_filename = os.path.join(DATA_DIR, "DensePose/UV_Processed.mat")
|
||||
tex_filename = os.path.join(DATA_DIR,"DensePose/texture_from_SURREAL.png")
|
||||
# rename your .pkl file or change this string
|
||||
verts_filename = os.path.join(DATA_DIR, "DensePose/smpl_model.pkl")
|
||||
|
||||
|
||||
# Load SMPL and texture data
|
||||
with open(verts_filename, 'rb') as f:
|
||||
data = pickle.load(f, encoding='latin1')
|
||||
v_template = torch.Tensor(data['v_template']).to(device) # (6890, 3)
|
||||
ALP_UV = loadmat(data_filename)
|
||||
tex = torch.from_numpy(_read_image(file_name=tex_filename, format='RGB') / 255. ).unsqueeze(0).to(device)
|
||||
|
||||
verts = torch.from_numpy((ALP_UV["All_vertices"]).astype(int)).squeeze().to(device) # (7829, 1)
|
||||
U = torch.Tensor(ALP_UV['All_U_norm']).to(device) # (7829, 1)
|
||||
V = torch.Tensor(ALP_UV['All_V_norm']).to(device) # (7829, 1)
|
||||
faces = torch.from_numpy((ALP_UV['All_Faces'] - 1).astype(int)).to(device) # (13774, 3)
|
||||
face_indices = torch.Tensor(ALP_UV['All_FaceIndices']).squeeze()
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# Display the texture image
|
||||
plt.figure(figsize=(10, 10))
|
||||
plt.imshow(tex.squeeze(0).cpu())
|
||||
plt.grid("off");
|
||||
plt.axis("off");
|
||||
|
||||
|
||||
# In DensePose, the body mesh is split into 24 parts. In the texture image, we can see the 24 parts are separated out into individual (200, 200) images per body part. The convention in DensePose is that each face in the mesh is associated with a body part (given by the face_indices tensor above). The vertex UV values (in the range [0, 1]) for each face are specific to the (200, 200) size texture map for the part of the body that the mesh face corresponds to. We cannot use them directly with the entire texture map. We have to offset the vertex UV values depending on what body part the associated face corresponds to.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# Map each face to a (u, v) offset
|
||||
offset_per_part = {}
|
||||
already_offset = set()
|
||||
cols, rows = 4, 6
|
||||
for i, u in enumerate(np.linspace(0, 1, cols, endpoint=False)):
|
||||
for j, v in enumerate(np.linspace(0, 1, rows, endpoint=False)):
|
||||
part = rows * i + j + 1 # parts are 1-indexed in face_indices
|
||||
offset_per_part[part] = (u, v)
|
||||
|
||||
# iterate over faces and offset the corresponding vertex u and v values
|
||||
for i in range(len(faces)):
|
||||
face_vert_idxs = faces[i]
|
||||
part = face_indices[i]
|
||||
offset_u, offset_v = offset_per_part[int(part.item())]
|
||||
|
||||
for vert_idx in face_vert_idxs:
|
||||
# vertices are reused, but we don't want to offset multiple times
|
||||
if vert_idx.item() not in already_offset:
|
||||
# offset u value
|
||||
U[vert_idx] = U[vert_idx] / cols + offset_u
|
||||
# offset v value
|
||||
# this also flips each part locally, as each part is upside down
|
||||
V[vert_idx] = (1 - V[vert_idx]) / rows + offset_v
|
||||
# add vertex to our set tracking offsetted vertices
|
||||
already_offset.add(vert_idx.item())
|
||||
|
||||
# invert V values
|
||||
U_norm, V_norm = U, 1 - V
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# create our verts_uv values
|
||||
verts_uv = torch.cat([U_norm[None],V_norm[None]], dim=2) # (1, 7829, 2)
|
||||
|
||||
# There are 6890 xyz vertex coordinates but 7829 vertex uv coordinates.
|
||||
# This is because the same vertex can be shared by multiple faces where each face may correspond to a different body part.
|
||||
# Therefore when initializing the Meshes class,
|
||||
# we need to map each of the vertices referenced by the DensePose faces (in verts, which is the "All_vertices" field)
|
||||
# to the correct xyz coordinate in the SMPL template mesh.
|
||||
v_template_extended = torch.stack(list(map(lambda vert: v_template[vert-1], verts))).unsqueeze(0).to(device) # (1, 7829, 3)
|
||||
|
||||
# add a batch dimension to faces
|
||||
faces = faces.unsqueeze(0)
|
||||
|
||||
|
||||
# ### Create our textured mesh
|
||||
#
|
||||
# **Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes.
|
||||
#
|
||||
# **TexturesUV** is an auxillary datastructure for storing vertex uv and texture maps for meshes.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
texture = TexturesUV(maps=tex, faces_uvs=faces, verts_uvs=verts_uv)
|
||||
mesh = Meshes(v_template_extended, faces, texture)
|
||||
|
||||
|
||||
# ## Create a renderer
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# Initialize a camera.
|
||||
# World coordinates +Y up, +X left and +Z in.
|
||||
R, T = look_at_view_transform(2.7, 0, 0)
|
||||
cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
|
||||
|
||||
# Define the settings for rasterization and shading. Here we set the output image to be of size
|
||||
# 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
|
||||
# and blur_radius=0.0.
|
||||
raster_settings = RasterizationSettings(
|
||||
image_size=512,
|
||||
blur_radius=0.0,
|
||||
faces_per_pixel=1,
|
||||
)
|
||||
|
||||
# Place a point light in front of the person.
|
||||
lights = PointLights(device=device, location=[[0.0, 0.0, 2.0]])
|
||||
|
||||
# Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will
|
||||
# interpolate the texture uv coordinates for each vertex, sample from a texture image and
|
||||
# apply the Phong lighting model
|
||||
renderer = MeshRenderer(
|
||||
rasterizer=MeshRasterizer(
|
||||
cameras=cameras,
|
||||
raster_settings=raster_settings
|
||||
),
|
||||
shader=SoftPhongShader(
|
||||
device=device,
|
||||
cameras=cameras,
|
||||
lights=lights
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# Render the textured mesh we created from the SMPL model and texture map.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
images = renderer(mesh)
|
||||
plt.figure(figsize=(10, 10))
|
||||
plt.imshow(images[0, ..., :3].cpu().numpy())
|
||||
plt.grid("off");
|
||||
plt.axis("off");
|
||||
|
||||
|
||||
# ### Different view and lighting of the body
|
||||
#
|
||||
# We can also change many other settings in the rendering pipeline. Here we:
|
||||
#
|
||||
# - change the **viewing angle** of the camera
|
||||
# - change the **position** of the point light
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# Rotate the person by increasing the elevation and azimuth angles to view the back of the person from above.
|
||||
R, T = look_at_view_transform(2.7, 10, 180)
|
||||
cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
|
||||
|
||||
# Move the light location so the light is shining on the person's back.
|
||||
lights.location = torch.tensor([[2.0, 2.0, -2.0]], device=device)
|
||||
|
||||
# Re render the mesh, passing in keyword arguments for the modified components.
|
||||
images = renderer(mesh, lights=lights, cameras=cameras)
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
plt.figure(figsize=(10, 10))
|
||||
plt.imshow(images[0, ..., :3].cpu().numpy())
|
||||
plt.grid("off");
|
||||
plt.axis("off");
|
||||
|
||||
|
||||
# ## Conclusion
|
||||
# In this tutorial, we've learned how to construct a **textured mesh** from **DensePose model and uv data**, as well as initialize a **Renderer** and change the viewing angle and lighting of our rendered mesh.
|
||||
@@ -65,12 +65,22 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install torch torchvision\n",
|
||||
"import os\n",
|
||||
"import sys\n",
|
||||
"import torch\n",
|
||||
"if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):\n",
|
||||
" !pip install pytorch3d\n",
|
||||
"else:\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
" need_pytorch3d=False\n",
|
||||
" try:\n",
|
||||
" import pytorch3d\n",
|
||||
" except ModuleNotFoundError:\n",
|
||||
" need_pytorch3d=True\n",
|
||||
" if need_pytorch3d:\n",
|
||||
" !curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz\n",
|
||||
" !tar xzf 1.10.0.tar.gz\n",
|
||||
" os.environ[\"CUB_HOME\"] = os.getcwd() + \"/cub-1.10.0\"\n",
|
||||
" !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -93,6 +103,8 @@
|
||||
"\n",
|
||||
"# Data structures and functions for rendering\n",
|
||||
"from pytorch3d.structures import Meshes\n",
|
||||
"from pytorch3d.vis.plotly_vis import AxisArgs, plot_batch_individually, plot_scene\n",
|
||||
"from pytorch3d.vis.texture_vis import texturesuv_image_matplotlib\n",
|
||||
"from pytorch3d.renderer import (\n",
|
||||
" look_at_view_transform,\n",
|
||||
" FoVPerspectiveCameras, \n",
|
||||
@@ -103,7 +115,8 @@
|
||||
" MeshRenderer, \n",
|
||||
" MeshRasterizer, \n",
|
||||
" SoftPhongShader,\n",
|
||||
" TexturesUV\n",
|
||||
" TexturesUV,\n",
|
||||
" TexturesVertex\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# add path for demo utils functions \n",
|
||||
@@ -234,8 +247,7 @@
|
||||
"obj_filename = os.path.join(DATA_DIR, \"cow_mesh/cow.obj\")\n",
|
||||
"\n",
|
||||
"# Load obj file\n",
|
||||
"mesh = load_objs_as_meshes([obj_filename], device=device)\n",
|
||||
"texture_image=mesh.textures.maps_padded()"
|
||||
"mesh = load_objs_as_meshes([obj_filename], device=device)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -263,9 +275,29 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"plt.figure(figsize=(7,7))\n",
|
||||
"texture_image=mesh.textures.maps_padded()\n",
|
||||
"plt.imshow(texture_image.squeeze().cpu().numpy())\n",
|
||||
"plt.grid(\"off\");\n",
|
||||
"plt.axis('off');"
|
||||
"plt.axis(\"off\");"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"PyTorch3D has a built-in way to view the texture map with matplotlib along with the points on the map corresponding to vertices. There is also a method, texturesuv_image_PIL, to get a similar image which can be saved to a file."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"plt.figure(figsize=(7,7))\n",
|
||||
"texturesuv_image_matplotlib(mesh.textures, subsample=None)\n",
|
||||
"plt.grid(\"off\");\n",
|
||||
"plt.axis(\"off\");"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -555,6 +587,185 @@
|
||||
"image_grid(images.cpu().numpy(), rows=4, cols=5, rgb=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## 7. Plotly visualization \n",
|
||||
"If you only want to visualize a mesh, you don't really need to use a differentiable renderer - instead we support plotting of Meshes with plotly. For these Meshes, we use TexturesVertex to define a texture for the rendering.\n",
|
||||
"`plot_meshes` creates a Plotly figure with a trace for each Meshes object. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"verts, faces_idx, _ = load_obj(obj_filename)\n",
|
||||
"faces = faces_idx.verts_idx\n",
|
||||
"\n",
|
||||
"# Initialize each vertex to be white in color.\n",
|
||||
"verts_rgb = torch.ones_like(verts)[None] # (1, V, 3)\n",
|
||||
"textures = TexturesVertex(verts_features=verts_rgb.to(device))\n",
|
||||
"\n",
|
||||
"# Create a Meshes object\n",
|
||||
"mesh = Meshes(\n",
|
||||
" verts=[verts.to(device)], \n",
|
||||
" faces=[faces.to(device)],\n",
|
||||
" textures=textures\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Render the plotly figure\n",
|
||||
"fig = plot_scene({\n",
|
||||
" \"subplot1\": {\n",
|
||||
" \"cow_mesh\": mesh\n",
|
||||
" }\n",
|
||||
"})\n",
|
||||
"fig.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# use Plotly's default colors (no texture)\n",
|
||||
"mesh = Meshes(\n",
|
||||
" verts=[verts.to(device)], \n",
|
||||
" faces=[faces.to(device)]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Render the plotly figure\n",
|
||||
"fig = plot_scene({\n",
|
||||
" \"subplot1\": {\n",
|
||||
" \"cow_mesh\": mesh\n",
|
||||
" }\n",
|
||||
"})\n",
|
||||
"fig.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# create a batch of meshes, and offset one to prevent overlap\n",
|
||||
"mesh_batch = Meshes(\n",
|
||||
" verts=[verts.to(device), (verts + 2).to(device)], \n",
|
||||
" faces=[faces.to(device), faces.to(device)]\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# plot mesh batch in the same trace\n",
|
||||
"fig = plot_scene({\n",
|
||||
" \"subplot1\": {\n",
|
||||
" \"cow_mesh_batch\": mesh_batch\n",
|
||||
" }\n",
|
||||
"})\n",
|
||||
"fig.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# plot batch of meshes in different traces\n",
|
||||
"fig = plot_scene({\n",
|
||||
" \"subplot1\": {\n",
|
||||
" \"cow_mesh1\": mesh_batch[0],\n",
|
||||
" \"cow_mesh2\": mesh_batch[1]\n",
|
||||
" }\n",
|
||||
"})\n",
|
||||
"fig.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# plot batch of meshes in different subplots\n",
|
||||
"fig = plot_scene({\n",
|
||||
" \"subplot1\": {\n",
|
||||
" \"cow_mesh1\": mesh_batch[0]\n",
|
||||
" },\n",
|
||||
" \"subplot2\":{\n",
|
||||
" \"cow_mesh2\": mesh_batch[1]\n",
|
||||
" }\n",
|
||||
"})\n",
|
||||
"fig.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"For batches, we can also use `plot_batch_individually` to avoid constructing the scene dictionary ourselves."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# extend the batch to have 4 meshes\n",
|
||||
"mesh_4 = mesh_batch.extend(2)\n",
|
||||
"\n",
|
||||
"# visualize the batch in different subplots, 2 per row\n",
|
||||
"fig = plot_batch_individually(mesh_4)\n",
|
||||
"# we can update the figure height and width\n",
|
||||
"fig.update_layout(height=1000, width=500)\n",
|
||||
"fig.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also modify the axis arguments and axis backgrounds in both functions. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fig2 = plot_scene({\n",
|
||||
" \"cow_plot1\": {\n",
|
||||
" \"cows\": mesh_batch\n",
|
||||
" }\n",
|
||||
"},\n",
|
||||
" xaxis={\"backgroundcolor\":\"rgb(200, 200, 230)\"},\n",
|
||||
" yaxis={\"backgroundcolor\":\"rgb(230, 200, 200)\"},\n",
|
||||
" zaxis={\"backgroundcolor\":\"rgb(200, 230, 200)\"}, \n",
|
||||
" axis_args=AxisArgs(showgrid=True))\n",
|
||||
"fig2.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"fig3 = plot_batch_individually(\n",
|
||||
" mesh_4, \n",
|
||||
" ncols=2,\n",
|
||||
" subplot_titles = [\"cow1\", \"cow2\", \"cow3\", \"cow4\"], # customize subplot titles\n",
|
||||
" xaxis={\"backgroundcolor\":\"rgb(200, 200, 230)\"},\n",
|
||||
" yaxis={\"backgroundcolor\":\"rgb(230, 200, 200)\"},\n",
|
||||
" zaxis={\"backgroundcolor\":\"rgb(200, 230, 200)\"}, \n",
|
||||
" axis_args=AxisArgs(showgrid=True))\n",
|
||||
"fig3.show()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
@@ -562,15 +773,15 @@
|
||||
"id": "t3qphI1ElUb5"
|
||||
},
|
||||
"source": [
|
||||
"## 7. Conclusion\n",
|
||||
"In this tutorial we learnt how to **load** a textured mesh from an obj file, initialize a PyTorch3D datastructure called **Meshes**, set up an **Renderer** consisting of a **Rasterizer** and a **Shader**, and modify several components of the rendering pipeline. "
|
||||
"## 8. Conclusion\n",
|
||||
"In this tutorial we learnt how to **load** a textured mesh from an obj file, initialize a PyTorch3D datastructure called **Meshes**, set up an **Renderer** consisting of a **Rasterizer** and a **Shader**, and modify several components of the rendering pipeline. We also learned how to render Meshes in Plotly figures."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"accelerator": "GPU",
|
||||
"anp_metadata": {
|
||||
"path": "fbsource/fbcode/vision/fair/pytorch3d/docs/tutorials/render_textured_meshes.ipynb"
|
||||
"path": "notebooks/render_textured_meshes.ipynb"
|
||||
},
|
||||
"bento_stylesheets": {
|
||||
"bento/extensions/flow/main.css": true,
|
||||
@@ -588,9 +799,9 @@
|
||||
"backup_notebook_id": "569222367081034"
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "intro_to_cv",
|
||||
"display_name": "pytorch3d_etc (local)",
|
||||
"language": "python",
|
||||
"name": "bento_kernel_intro_to_cv"
|
||||
"name": "pytorch3d_etc_local"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
|
||||
@@ -24,12 +24,22 @@
|
||||
|
||||
|
||||
get_ipython().system('pip install torch torchvision')
|
||||
import os
|
||||
import sys
|
||||
import torch
|
||||
if torch.__version__=='1.6.0+cu101' and sys.platform.startswith('linux'):
|
||||
get_ipython().system('pip install pytorch3d')
|
||||
else:
|
||||
get_ipython().system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
|
||||
need_pytorch3d=False
|
||||
try:
|
||||
import pytorch3d
|
||||
except ModuleNotFoundError:
|
||||
need_pytorch3d=True
|
||||
if need_pytorch3d:
|
||||
get_ipython().system('curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz')
|
||||
get_ipython().system('tar xzf 1.10.0.tar.gz')
|
||||
os.environ["CUB_HOME"] = os.getcwd() + "/cub-1.10.0"
|
||||
get_ipython().system("pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'")
|
||||
|
||||
|
||||
# In[ ]:
|
||||
@@ -45,6 +55,8 @@ from pytorch3d.io import load_objs_as_meshes, load_obj
|
||||
|
||||
# Data structures and functions for rendering
|
||||
from pytorch3d.structures import Meshes
|
||||
from pytorch3d.vis.plotly_vis import AxisArgs, plot_batch_individually, plot_scene
|
||||
from pytorch3d.vis.texture_vis import texturesuv_image_matplotlib
|
||||
from pytorch3d.renderer import (
|
||||
look_at_view_transform,
|
||||
FoVPerspectiveCameras,
|
||||
@@ -55,7 +67,8 @@ from pytorch3d.renderer import (
|
||||
MeshRenderer,
|
||||
MeshRasterizer,
|
||||
SoftPhongShader,
|
||||
TexturesUV
|
||||
TexturesUV,
|
||||
TexturesVertex
|
||||
)
|
||||
|
||||
# add path for demo utils functions
|
||||
@@ -119,7 +132,6 @@ obj_filename = os.path.join(DATA_DIR, "cow_mesh/cow.obj")
|
||||
|
||||
# Load obj file
|
||||
mesh = load_objs_as_meshes([obj_filename], device=device)
|
||||
texture_image=mesh.textures.maps_padded()
|
||||
|
||||
|
||||
# #### Let's visualize the texture map
|
||||
@@ -128,9 +140,21 @@ texture_image=mesh.textures.maps_padded()
|
||||
|
||||
|
||||
plt.figure(figsize=(7,7))
|
||||
texture_image=mesh.textures.maps_padded()
|
||||
plt.imshow(texture_image.squeeze().cpu().numpy())
|
||||
plt.grid("off");
|
||||
plt.axis('off');
|
||||
plt.axis("off");
|
||||
|
||||
|
||||
# PyTorch3D has a built-in way to view the texture map with matplotlib along with the points on the map corresponding to vertices. There is also a method, texturesuv_image_PIL, to get a similar image which can be saved to a file.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
plt.figure(figsize=(7,7))
|
||||
texturesuv_image_matplotlib(mesh.textures, subsample=None)
|
||||
plt.grid("off");
|
||||
plt.axis("off");
|
||||
|
||||
|
||||
# ## 2. Create a renderer
|
||||
@@ -302,5 +326,145 @@ images = renderer(meshes, cameras=cameras, lights=lights)
|
||||
image_grid(images.cpu().numpy(), rows=4, cols=5, rgb=True)
|
||||
|
||||
|
||||
# ## 7. Conclusion
|
||||
# In this tutorial we learnt how to **load** a textured mesh from an obj file, initialize a PyTorch3D datastructure called **Meshes**, set up an **Renderer** consisting of a **Rasterizer** and a **Shader**, and modify several components of the rendering pipeline.
|
||||
# ## 7. Plotly visualization
|
||||
# If you only want to visualize a mesh, you don't really need to use a differentiable renderer - instead we support plotting of Meshes with plotly. For these Meshes, we use TexturesVertex to define a texture for the rendering.
|
||||
# `plot_meshes` creates a Plotly figure with a trace for each Meshes object.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
verts, faces_idx, _ = load_obj(obj_filename)
|
||||
faces = faces_idx.verts_idx
|
||||
|
||||
# Initialize each vertex to be white in color.
|
||||
verts_rgb = torch.ones_like(verts)[None] # (1, V, 3)
|
||||
textures = TexturesVertex(verts_features=verts_rgb.to(device))
|
||||
|
||||
# Create a Meshes object
|
||||
mesh = Meshes(
|
||||
verts=[verts.to(device)],
|
||||
faces=[faces.to(device)],
|
||||
textures=textures
|
||||
)
|
||||
|
||||
# Render the plotly figure
|
||||
fig = plot_scene({
|
||||
"subplot1": {
|
||||
"cow_mesh": mesh
|
||||
}
|
||||
})
|
||||
fig.show()
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# use Plotly's default colors (no texture)
|
||||
mesh = Meshes(
|
||||
verts=[verts.to(device)],
|
||||
faces=[faces.to(device)]
|
||||
)
|
||||
|
||||
# Render the plotly figure
|
||||
fig = plot_scene({
|
||||
"subplot1": {
|
||||
"cow_mesh": mesh
|
||||
}
|
||||
})
|
||||
fig.show()
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# create a batch of meshes, and offset one to prevent overlap
|
||||
mesh_batch = Meshes(
|
||||
verts=[verts.to(device), (verts + 2).to(device)],
|
||||
faces=[faces.to(device), faces.to(device)]
|
||||
)
|
||||
|
||||
# plot mesh batch in the same trace
|
||||
fig = plot_scene({
|
||||
"subplot1": {
|
||||
"cow_mesh_batch": mesh_batch
|
||||
}
|
||||
})
|
||||
fig.show()
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# plot batch of meshes in different traces
|
||||
fig = plot_scene({
|
||||
"subplot1": {
|
||||
"cow_mesh1": mesh_batch[0],
|
||||
"cow_mesh2": mesh_batch[1]
|
||||
}
|
||||
})
|
||||
fig.show()
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# plot batch of meshes in different subplots
|
||||
fig = plot_scene({
|
||||
"subplot1": {
|
||||
"cow_mesh1": mesh_batch[0]
|
||||
},
|
||||
"subplot2":{
|
||||
"cow_mesh2": mesh_batch[1]
|
||||
}
|
||||
})
|
||||
fig.show()
|
||||
|
||||
|
||||
# For batches, we can also use `plot_batch_individually` to avoid constructing the scene dictionary ourselves.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
# extend the batch to have 4 meshes
|
||||
mesh_4 = mesh_batch.extend(2)
|
||||
|
||||
# visualize the batch in different subplots, 2 per row
|
||||
fig = plot_batch_individually(mesh_4)
|
||||
# we can update the figure height and width
|
||||
fig.update_layout(height=1000, width=500)
|
||||
fig.show()
|
||||
|
||||
|
||||
# We can also modify the axis arguments and axis backgrounds in both functions.
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
fig2 = plot_scene({
|
||||
"cow_plot1": {
|
||||
"cows": mesh_batch
|
||||
}
|
||||
},
|
||||
xaxis={"backgroundcolor":"rgb(200, 200, 230)"},
|
||||
yaxis={"backgroundcolor":"rgb(230, 200, 200)"},
|
||||
zaxis={"backgroundcolor":"rgb(200, 230, 200)"},
|
||||
axis_args=AxisArgs(showgrid=True))
|
||||
fig2.show()
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
fig3 = plot_batch_individually(
|
||||
mesh_4,
|
||||
ncols=2,
|
||||
subplot_titles = ["cow1", "cow2", "cow3", "cow4"], # customize subplot titles
|
||||
xaxis={"backgroundcolor":"rgb(200, 200, 230)"},
|
||||
yaxis={"backgroundcolor":"rgb(230, 200, 200)"},
|
||||
zaxis={"backgroundcolor":"rgb(200, 230, 200)"},
|
||||
axis_args=AxisArgs(showgrid=True))
|
||||
fig3.show()
|
||||
|
||||
|
||||
# ## 8. Conclusion
|
||||
# In this tutorial we learnt how to **load** a textured mesh from an obj file, initialize a PyTorch3D datastructure called **Meshes**, set up an **Renderer** consisting of a **Rasterizer** and a **Shader**, and modify several components of the rendering pipeline. We also learned how to render Meshes in Plotly figures.
|
||||
|
||||
Reference in New Issue
Block a user