mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-12-21 23:00:34 +08:00
update for version 0.5.0
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
<!DOCTYPE html><html lang=""><head><meta charSet="utf-8"/><meta http-equiv="X-UA-Compatible" content="IE=edge"/><title>PyTorch3D · A library for deep learning with 3D data</title><meta name="viewport" content="width=device-width"/><meta name="generator" content="Docusaurus"/><meta name="description" content="A library for deep learning with 3D data"/><meta property="og:title" content="PyTorch3D · A library for deep learning with 3D data"/><meta property="og:type" content="website"/><meta property="og:url" content="https://pytorch3d.org/"/><meta property="og:description" content="A library for deep learning with 3D data"/><meta property="og:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><meta name="twitter:card" content="summary"/><meta name="twitter:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><link rel="shortcut icon" href="/img/pytorch3dfavicon.png"/><link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css"/><script>
|
||||
<!DOCTYPE html><html lang=""><head><meta charSet="utf-8"/><meta http-equiv="X-UA-Compatible" content="IE=edge"/><title>PyTorch3D · A library for deep learning with 3D data</title><meta name="viewport" content="width=device-width, initial-scale=1.0"/><meta name="generator" content="Docusaurus"/><meta name="description" content="A library for deep learning with 3D data"/><meta property="og:title" content="PyTorch3D · A library for deep learning with 3D data"/><meta property="og:type" content="website"/><meta property="og:url" content="https://pytorch3d.org/"/><meta property="og:description" content="A library for deep learning with 3D data"/><meta property="og:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><meta name="twitter:card" content="summary"/><meta name="twitter:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><link rel="shortcut icon" href="/img/pytorch3dfavicon.png"/><link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css"/><script>
|
||||
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
|
||||
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
|
||||
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
|
||||
@@ -82,7 +82,8 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h1 id="Fit-a-mesh-via-rendering">Fit a mesh via rendering<a class="anchor-link" href="#Fit-a-mesh-via-rendering">¶</a></h1><p>This tutorial shows how to:</p>
|
||||
<ul>
|
||||
@@ -95,16 +96,18 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="0.-Install-and-Import-modules">0. Install and Import modules<a class="anchor-link" href="#0.-Install-and-Import-modules">¶</a></h2>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>If <code>torch</code>, <code>torchvision</code> and <code>pytorch3d</code> are not installed, run the following cell:</p>
|
||||
<p>Ensure <code>torch</code> and <code>torchvision</code> are installed. If <code>pytorch3d</code> is not installed, install it using the following cell:</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -113,19 +116,25 @@
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="ne">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s2">"1.9"</span><span class="p">)</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s2">"linux"</span><span class="p">):</span>
|
||||
<span class="c1"># We try to install PyTorch3D via a released wheel.</span>
|
||||
<span class="n">version_str</span><span class="o">=</span><span class="s2">""</span><span class="o">.</span><span class="n">join</span><span class="p">([</span>
|
||||
<span class="sa">f</span><span class="s2">"py3</span><span class="si">{</span><span class="n">sys</span><span class="o">.</span><span class="n">version_info</span><span class="o">.</span><span class="n">minor</span><span class="si">}</span><span class="s2">_cu"</span><span class="p">,</span>
|
||||
<span class="n">torch</span><span class="o">.</span><span class="n">version</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">replace</span><span class="p">(</span><span class="s2">"."</span><span class="p">,</span><span class="s2">""</span><span class="p">),</span>
|
||||
<span class="sa">f</span><span class="s2">"_pyt</span><span class="si">{</span><span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="p">[</span><span class="mi">0</span><span class="p">:</span><span class="mi">5</span><span class="p">:</span><span class="mi">2</span><span class="p">]</span><span class="si">}</span><span class="s2">"</span>
|
||||
<span class="p">])</span>
|
||||
<span class="o">!</span>pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/<span class="o">{</span>version_str<span class="o">}</span>/download.html
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="c1"># We try to install PyTorch3D from source.</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
@@ -143,16 +152,15 @@
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="kn">import</span> <span class="nn">matplotlib.pyplot</span> <span class="k">as</span> <span class="nn">plt</span>
|
||||
<span class="kn">from</span> <span class="nn">skimage.io</span> <span class="k">import</span> <span class="n">imread</span>
|
||||
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.utils</span> <span class="k">import</span> <span class="n">ico_sphere</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.utils</span> <span class="kn">import</span> <span class="n">ico_sphere</span>
|
||||
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
|
||||
<span class="kn">from</span> <span class="nn">tqdm.notebook</span> <span class="k">import</span> <span class="n">tqdm</span>
|
||||
<span class="kn">from</span> <span class="nn">tqdm.notebook</span> <span class="kn">import</span> <span class="n">tqdm</span>
|
||||
|
||||
<span class="c1"># Util function for loading meshes</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.io</span> <span class="k">import</span> <span class="n">load_objs_as_meshes</span><span class="p">,</span> <span class="n">save_obj</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.io</span> <span class="kn">import</span> <span class="n">load_objs_as_meshes</span><span class="p">,</span> <span class="n">save_obj</span>
|
||||
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.loss</span> <span class="k">import</span> <span class="p">(</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.loss</span> <span class="kn">import</span> <span class="p">(</span>
|
||||
<span class="n">chamfer_distance</span><span class="p">,</span>
|
||||
<span class="n">mesh_edge_loss</span><span class="p">,</span>
|
||||
<span class="n">mesh_laplacian_smoothing</span><span class="p">,</span>
|
||||
@@ -160,8 +168,8 @@
|
||||
<span class="p">)</span>
|
||||
|
||||
<span class="c1"># Data structures and functions for rendering</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.structures</span> <span class="k">import</span> <span class="n">Meshes</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.renderer</span> <span class="k">import</span> <span class="p">(</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.structures</span> <span class="kn">import</span> <span class="n">Meshes</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.renderer</span> <span class="kn">import</span> <span class="p">(</span>
|
||||
<span class="n">look_at_view_transform</span><span class="p">,</span>
|
||||
<span class="n">OpenGLPerspectiveCameras</span><span class="p">,</span>
|
||||
<span class="n">PointLights</span><span class="p">,</span>
|
||||
@@ -186,7 +194,8 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>If using <strong>Google Colab</strong>, fetch the utils file for plotting image grids:</p>
|
||||
</div>
|
||||
@@ -198,14 +207,15 @@
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py
|
||||
<span class="kn">from</span> <span class="nn">plot_image_grid</span> <span class="k">import</span> <span class="n">image_grid</span>
|
||||
<span class="kn">from</span> <span class="nn">plot_image_grid</span> <span class="kn">import</span> <span class="n">image_grid</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>OR if running <strong>locally</strong> uncomment and run the following cell:</p>
|
||||
</div>
|
||||
@@ -223,17 +233,19 @@
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h3 id="1.-Load-a-mesh-and-texture-file">1. Load a mesh and texture file<a class="anchor-link" href="#1.-Load-a-mesh-and-texture-file">¶</a></h3><p>Load an <code>.obj</code> file and it's associated <code>.mtl</code> file and create a <strong>Textures</strong> and <strong>Meshes</strong> object.</p>
|
||||
<h3 id="1.-Load-a-mesh-and-texture-file">1. Load a mesh and texture file<a class="anchor-link" href="#1.-Load-a-mesh-and-texture-file">¶</a></h3><p>Load an <code>.obj</code> file and its associated <code>.mtl</code> file and create a <strong>Textures</strong> and <strong>Meshes</strong> object.</p>
|
||||
<p><strong>Meshes</strong> is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes.</p>
|
||||
<p><strong>TexturesVertex</strong> is an auxillary datastructure for storing vertex rgb texture information about meshes.</p>
|
||||
<p><strong>TexturesVertex</strong> is an auxiliary datastructure for storing vertex rgb texture information about meshes.</p>
|
||||
<p><strong>Meshes</strong> has several class methods which are used throughout the rendering pipeline.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>If running this notebook using <strong>Google Colab</strong>, run the following cell to fetch the mesh obj and texture files and save it at the path <code>data/cow_mesh</code>:
|
||||
If running locally, the data is already available at the correct path.</p>
|
||||
@@ -281,7 +293,7 @@ If running locally, the data is already available at the correct path.</p>
|
||||
<span class="n">N</span> <span class="o">=</span> <span class="n">verts</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
|
||||
<span class="n">center</span> <span class="o">=</span> <span class="n">verts</span><span class="o">.</span><span class="n">mean</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span>
|
||||
<span class="n">scale</span> <span class="o">=</span> <span class="nb">max</span><span class="p">((</span><span class="n">verts</span> <span class="o">-</span> <span class="n">center</span><span class="p">)</span><span class="o">.</span><span class="n">abs</span><span class="p">()</span><span class="o">.</span><span class="n">max</span><span class="p">(</span><span class="mi">0</span><span class="p">)[</span><span class="mi">0</span><span class="p">])</span>
|
||||
<span class="n">mesh</span><span class="o">.</span><span class="n">offset_verts_</span><span class="p">(</span><span class="o">-</span><span class="n">center</span><span class="o">.</span><span class="n">expand</span><span class="p">(</span><span class="n">N</span><span class="p">,</span> <span class="mi">3</span><span class="p">))</span>
|
||||
<span class="n">mesh</span><span class="o">.</span><span class="n">offset_verts_</span><span class="p">(</span><span class="o">-</span><span class="n">center</span><span class="p">)</span>
|
||||
<span class="n">mesh</span><span class="o">.</span><span class="n">scale_verts_</span><span class="p">((</span><span class="mf">1.0</span> <span class="o">/</span> <span class="nb">float</span><span class="p">(</span><span class="n">scale</span><span class="p">)));</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
@@ -289,7 +301,8 @@ If running locally, the data is already available at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="2.-Dataset-Creation">2. Dataset Creation<a class="anchor-link" href="#2.-Dataset-Creation">¶</a></h2><p>We sample different camera positions that encode multiple viewpoints of the cow. We create a renderer with a shader that performs texture map interpolation. We render a synthetic dataset of images of the textured cow mesh from multiple viewpoints.</p>
|
||||
</div>
|
||||
@@ -328,7 +341,7 @@ If running locally, the data is already available at the correct path.</p>
|
||||
<span class="c1"># purposes only we will set faces_per_pixel=1 and blur_radius=0.0. Refer to </span>
|
||||
<span class="c1"># rasterize_meshes.py for explanations of these parameters. We also leave </span>
|
||||
<span class="c1"># bin_size and max_faces_per_bin to their default values of None, which sets </span>
|
||||
<span class="c1"># their values using huristics and ensures that the faster coarse-to-fine </span>
|
||||
<span class="c1"># their values using heuristics and ensures that the faster coarse-to-fine </span>
|
||||
<span class="c1"># rasterization method is used. Refer to docs/notes/renderer.md for an </span>
|
||||
<span class="c1"># explanation of the difference between naive and coarse-to-fine rasterization. </span>
|
||||
<span class="n">raster_settings</span> <span class="o">=</span> <span class="n">RasterizationSettings</span><span class="p">(</span>
|
||||
@@ -337,8 +350,8 @@ If running locally, the data is already available at the correct path.</p>
|
||||
<span class="n">faces_per_pixel</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
<span class="c1"># Create a phong renderer by composing a rasterizer and a shader. The textured </span>
|
||||
<span class="c1"># phong shader will interpolate the texture uv coordinates for each vertex, </span>
|
||||
<span class="c1"># Create a Phong renderer by composing a rasterizer and a shader. The textured </span>
|
||||
<span class="c1"># Phong shader will interpolate the texture uv coordinates for each vertex, </span>
|
||||
<span class="c1"># sample from a texture image and apply the Phong lighting model</span>
|
||||
<span class="n">renderer</span> <span class="o">=</span> <span class="n">MeshRenderer</span><span class="p">(</span>
|
||||
<span class="n">rasterizer</span><span class="o">=</span><span class="n">MeshRasterizer</span><span class="p">(</span>
|
||||
@@ -371,7 +384,8 @@ If running locally, the data is already available at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>Visualize the dataset:</p>
|
||||
</div>
|
||||
@@ -391,9 +405,10 @@ If running locally, the data is already available at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>Later in this tutorial, we will fit a mesh to the rendered RGB images, as well as to just images of just the cow silhouette. For the latter case, we will render a dataset of silhouette images. Most shaders in PyTorch3D will output an alpha channel along with the RGB image as a 4th channel in an RGBA image. The alpha channel encodes the probability that each pixel belongs to the foreground of the object. We contruct a soft silhouette shader to render this alpha channel.</p>
|
||||
<p>Later in this tutorial, we will fit a mesh to the rendered RGB images, as well as to just images of just the cow silhouette. For the latter case, we will render a dataset of silhouette images. Most shaders in PyTorch3D will output an alpha channel along with the RGB image as a 4th channel in an RGBA image. The alpha channel encodes the probability that each pixel belongs to the foreground of the object. We construct a soft silhouette shader to render this alpha channel.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -433,7 +448,8 @@ If running locally, the data is already available at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="3.-Mesh-prediction-via-silhouette-rendering">3. Mesh prediction via silhouette rendering<a class="anchor-link" href="#3.-Mesh-prediction-via-silhouette-rendering">¶</a></h2><p>In the previous section, we created a dataset of images of multiple viewpoints of a cow. In this section, we predict a mesh by observing those target images without any knowledge of the ground truth cow mesh. We assume we know the position of the cameras and lighting.</p>
|
||||
<p>We first define some helper functions to visualize the results of our mesh prediction:</p>
|
||||
@@ -451,7 +467,8 @@ If running locally, the data is already available at the correct path.</p>
|
||||
<span class="n">target_image</span><span class="o">=</span><span class="n">target_rgb</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">title</span><span class="o">=</span><span class="s1">''</span><span class="p">,</span>
|
||||
<span class="n">silhouette</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
|
||||
<span class="n">inds</span> <span class="o">=</span> <span class="mi">3</span> <span class="k">if</span> <span class="n">silhouette</span> <span class="k">else</span> <span class="nb">range</span><span class="p">(</span><span class="mi">3</span><span class="p">)</span>
|
||||
<span class="n">predicted_images</span> <span class="o">=</span> <span class="n">renderer</span><span class="p">(</span><span class="n">predicted_mesh</span><span class="p">)</span>
|
||||
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
|
||||
<span class="n">predicted_images</span> <span class="o">=</span> <span class="n">renderer</span><span class="p">(</span><span class="n">predicted_mesh</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">subplot</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">predicted_images</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="n">inds</span><span class="p">]</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">detach</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
@@ -459,7 +476,6 @@ If running locally, the data is already available at the correct path.</p>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">subplot</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">target_image</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">detach</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">title</span><span class="p">(</span><span class="n">title</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Plot losses as a function of optimization iteration</span>
|
||||
@@ -478,7 +494,8 @@ If running locally, the data is already available at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>Starting from a sphere mesh, we will learn offsets of each vertex such that the predicted mesh silhouette is more similar to the target silhouette image at each optimization step. We begin by loading our initial sphere mesh:</p>
|
||||
</div>
|
||||
@@ -497,7 +514,8 @@ If running locally, the data is already available at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>We create a new differentiable renderer for rendering the silhouette of our predicted mesh:</p>
|
||||
</div>
|
||||
@@ -532,7 +550,8 @@ If running locally, the data is already available at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>We initialize settings, losses, and the optimizer that will be used to iteratively fit our mesh to the target silhouettes:</p>
|
||||
</div>
|
||||
@@ -585,9 +604,10 @@ If running locally, the data is already available at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>We write an optimization loop to iteratively refine our predicted mesh from the sphere mesh into a mesh that matches the sillhouettes of the target images:</p>
|
||||
<p>We write an optimization loop to iteratively refine our predicted mesh from the sphere mesh into a mesh that matches the silhouettes of the target images:</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -622,7 +642,8 @@ If running locally, the data is already available at the correct path.</p>
|
||||
<span class="n">sum_loss</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">(</span><span class="mf">0.0</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
|
||||
<span class="k">for</span> <span class="n">k</span><span class="p">,</span> <span class="n">l</span> <span class="ow">in</span> <span class="n">loss</span><span class="o">.</span><span class="n">items</span><span class="p">():</span>
|
||||
<span class="n">sum_loss</span> <span class="o">+=</span> <span class="n">l</span> <span class="o">*</span> <span class="n">losses</span><span class="p">[</span><span class="n">k</span><span class="p">][</span><span class="s2">"weight"</span><span class="p">]</span>
|
||||
<span class="n">losses</span><span class="p">[</span><span class="n">k</span><span class="p">][</span><span class="s2">"values"</span><span class="p">]</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">l</span><span class="p">)</span>
|
||||
<span class="n">losses</span><span class="p">[</span><span class="n">k</span><span class="p">][</span><span class="s2">"values"</span><span class="p">]</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="nb">float</span><span class="p">(</span><span class="n">l</span><span class="o">.</span><span class="n">detach</span><span class="p">()</span><span class="o">.</span><span class="n">cpu</span><span class="p">()))</span>
|
||||
|
||||
|
||||
<span class="c1"># Print the losses</span>
|
||||
<span class="n">loop</span><span class="o">.</span><span class="n">set_description</span><span class="p">(</span><span class="s2">"total_loss = </span><span class="si">%.6f</span><span class="s2">"</span> <span class="o">%</span> <span class="n">sum_loss</span><span class="p">)</span>
|
||||
@@ -654,7 +675,8 @@ If running locally, the data is already available at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="3.-Mesh-and-texture-prediction-via-textured-rendering">3. Mesh and texture prediction via textured rendering<a class="anchor-link" href="#3.-Mesh-and-texture-prediction-via-textured-rendering">¶</a></h2><p>We can predict both the mesh and its texture if we add an additional loss based on the comparing a predicted rendered RGB image to the target image. As before, we start with a sphere mesh. We learn both translational offsets and RGB texture colors for each vertex in the sphere mesh. Since our loss is based on rendered RGB pixel values instead of just the silhouette, we use a <strong>SoftPhongShader</strong> instead of a <strong>SoftSilhouetteShader</strong>.</p>
|
||||
</div>
|
||||
@@ -691,7 +713,8 @@ If running locally, the data is already available at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>We initialize settings, losses, and the optimizer that will be used to iteratively fit our mesh to the target RGB images:</p>
|
||||
</div>
|
||||
@@ -738,7 +761,8 @@ If running locally, the data is already available at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>We write an optimization loop to iteratively refine our predicted mesh and its vertex colors from the sphere mesh into a mesh that matches the target images:</p>
|
||||
</div>
|
||||
@@ -787,7 +811,7 @@ If running locally, the data is already available at the correct path.</p>
|
||||
<span class="n">sum_loss</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">(</span><span class="mf">0.0</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
|
||||
<span class="k">for</span> <span class="n">k</span><span class="p">,</span> <span class="n">l</span> <span class="ow">in</span> <span class="n">loss</span><span class="o">.</span><span class="n">items</span><span class="p">():</span>
|
||||
<span class="n">sum_loss</span> <span class="o">+=</span> <span class="n">l</span> <span class="o">*</span> <span class="n">losses</span><span class="p">[</span><span class="n">k</span><span class="p">][</span><span class="s2">"weight"</span><span class="p">]</span>
|
||||
<span class="n">losses</span><span class="p">[</span><span class="n">k</span><span class="p">][</span><span class="s2">"values"</span><span class="p">]</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">l</span><span class="p">)</span>
|
||||
<span class="n">losses</span><span class="p">[</span><span class="n">k</span><span class="p">][</span><span class="s2">"values"</span><span class="p">]</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="nb">float</span><span class="p">(</span><span class="n">l</span><span class="o">.</span><span class="n">detach</span><span class="p">()</span><span class="o">.</span><span class="n">cpu</span><span class="p">()))</span>
|
||||
|
||||
<span class="c1"># Print the losses</span>
|
||||
<span class="n">loop</span><span class="o">.</span><span class="n">set_description</span><span class="p">(</span><span class="s2">"total_loss = </span><span class="si">%.6f</span><span class="s2">"</span> <span class="o">%</span> <span class="n">sum_loss</span><span class="p">)</span>
|
||||
@@ -817,14 +841,16 @@ If running locally, the data is already available at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>Save the final predicted mesh:</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="4.-Save-the-final-predicted-mesh">4. Save the final predicted mesh<a class="anchor-link" href="#4.-Save-the-final-predicted-mesh">¶</a></h2>
|
||||
</div>
|
||||
@@ -850,10 +876,11 @@ If running locally, the data is already available at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
</div>
|
||||
<div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="5.-Conclusion">5. Conclusion<a class="anchor-link" href="#5.-Conclusion">¶</a></h2><p>In this tutorial, we learned how to load a textured mesh from an obj file, create a synthetic dataset by rendering the mesh from multiple viewpoints. We showed how to set up an optimization loop to fit a mesh to the observed dataset images based on a rendered silhouette loss. We then augmented this optimization loop with an additional loss based on rendered RGB images, which allowed us to predict both a mesh and its texture.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div></div></div></div></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2020 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>
|
||||
</div></div></div></div></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2021 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>
|
||||
Reference in New Issue
Block a user