mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2026-02-06 22:12:16 +08:00
Update latest version of site
This commit is contained in:
@@ -6,7 +6,7 @@
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a mesh with texture via rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
@@ -122,12 +122,22 @@ where $d(g_i, g_j)$ is a suitable metric that compares the extrinsics of cameras
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a mesh with texture via rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
@@ -122,12 +122,22 @@ where $d(g_i, g_j)$ is a suitable metric that compares the extrinsics of cameras
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a mesh with texture via rendering</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
@@ -116,12 +116,22 @@
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -347,8 +357,8 @@
|
||||
<span class="bp">self</span><span class="o">.</span><span class="n">device</span> <span class="o">=</span> <span class="n">meshes</span><span class="o">.</span><span class="n">device</span>
|
||||
<span class="bp">self</span><span class="o">.</span><span class="n">renderer</span> <span class="o">=</span> <span class="n">renderer</span>
|
||||
|
||||
<span class="c1"># Get the silhouette of the reference RGB image by finding all the non zero values. </span>
|
||||
<span class="n">image_ref</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">from_numpy</span><span class="p">((</span><span class="n">image_ref</span><span class="p">[</span><span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">max</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span> <span class="o">!=</span> <span class="mi">0</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">))</span>
|
||||
<span class="c1"># Get the silhouette of the reference RGB image by finding all non-white pixel values. </span>
|
||||
<span class="n">image_ref</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">from_numpy</span><span class="p">((</span><span class="n">image_ref</span><span class="p">[</span><span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">max</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span> <span class="o">!=</span> <span class="mi">1</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">))</span>
|
||||
<span class="bp">self</span><span class="o">.</span><span class="n">register_buffer</span><span class="p">(</span><span class="s1">'image_ref'</span><span class="p">,</span> <span class="n">image_ref</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Create an optimizable parameter for the x, y, z position of the camera. </span>
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a mesh with texture via rendering</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
@@ -116,12 +116,22 @@
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -347,8 +357,8 @@
|
||||
<span class="bp">self</span><span class="o">.</span><span class="n">device</span> <span class="o">=</span> <span class="n">meshes</span><span class="o">.</span><span class="n">device</span>
|
||||
<span class="bp">self</span><span class="o">.</span><span class="n">renderer</span> <span class="o">=</span> <span class="n">renderer</span>
|
||||
|
||||
<span class="c1"># Get the silhouette of the reference RGB image by finding all the non zero values. </span>
|
||||
<span class="n">image_ref</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">from_numpy</span><span class="p">((</span><span class="n">image_ref</span><span class="p">[</span><span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">max</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span> <span class="o">!=</span> <span class="mi">0</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">))</span>
|
||||
<span class="c1"># Get the silhouette of the reference RGB image by finding all non-white pixel values. </span>
|
||||
<span class="n">image_ref</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">from_numpy</span><span class="p">((</span><span class="n">image_ref</span><span class="p">[</span><span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">max</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span> <span class="o">!=</span> <span class="mi">1</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">))</span>
|
||||
<span class="bp">self</span><span class="o">.</span><span class="n">register_buffer</span><span class="p">(</span><span class="s1">'image_ref'</span><span class="p">,</span> <span class="n">image_ref</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Create an optimizable parameter for the x, y, z position of the camera. </span>
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a mesh with texture via rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
@@ -115,12 +115,22 @@
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a mesh with texture via rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
@@ -115,12 +115,22 @@
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a mesh with texture via rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
@@ -134,12 +134,22 @@ the predicted mesh is closer to the target mesh at each optimization step. To ac
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a mesh with texture via rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
@@ -134,12 +134,22 @@ the predicted mesh is closer to the target mesh at each optimization step. To ac
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a mesh with texture via rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
@@ -114,12 +114,22 @@
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a mesh with texture via rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
@@ -114,12 +114,22 @@
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a mesh with texture via rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
@@ -62,7 +62,4 @@
|
||||
};
|
||||
}
|
||||
});
|
||||
</script></nav></div><div class="container mainContainer documentContainer postContainer"><div class="wrapper"><div class="post"><header class="postHeader"><h1 class="postHeaderTitle">Welcome to the PyTorch3D Tutorials</h1></header><p>Here you can learn about the structure and applications of Pytorch3D from examples which are in the form of ipython notebooks.</p><h3> Run interactively </h3><p>At the top of each example you can find a button named <strong>"Run in Google Colab"</strong> which will open the notebook in <a href="https://colab.research.google.com/notebooks/intro.ipynb"> Google Colaboratory </a> where you can run the code directly in the browser with access to GPU support - it looks like this:</p><div class="tutorialButtonsWrapper"><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" target="_blank"><img class="colabButton" align="left" src="/img/colab_icon.png"/>Run in Google Colab</a></div></div><p> You can modify the code and experiment with varying different settings. Remember to install pytorch, torchvision, fvcore and pytorch3d in the first cell of the colab notebook by running: </p><div><span><pre><code class="hljs css language-bash">!pip install torch torchvision
|
||||
!pip install <span class="hljs-string">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</code></pre>
|
||||
</span></div>This installs the latest stable version of PyTorch3D from github.<h3> Run locally </h3><p> There is also a button to download the notebook and source code to run it locally. </p></div></div></div></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2020 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>
|
||||
</script></nav></div><div class="container mainContainer documentContainer postContainer"><div class="wrapper"><div class="post"><header class="postHeader"><h1 class="postHeaderTitle">Welcome to the PyTorch3D Tutorials</h1></header><p>Here you can learn about the structure and applications of Pytorch3D from examples which are in the form of ipython notebooks.</p><h3> Run interactively </h3><p>At the top of each example you can find a button named <strong>"Run in Google Colab"</strong> which will open the notebook in <a href="https://colab.research.google.com/notebooks/intro.ipynb"> Google Colaboratory </a> where you can run the code directly in the browser with access to GPU support - it looks like this:</p><div class="tutorialButtonsWrapper"><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" target="_blank"><img class="colabButton" align="left" src="/img/colab_icon.png"/>Run in Google Colab</a></div></div><p> You can modify the code and experiment with varying different settings. Remember to install the latest stable version of PyTorch3D and its dependencies. Code to do this with pip is provided in each notebook. </p><h3> Run locally </h3><p> There is also a button to download the notebook and source code to run it locally. </p></div></div></div></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2020 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>
|
||||
543
tutorials/render_colored_points.html
Normal file
543
tutorials/render_colored_points.html
Normal file
@@ -0,0 +1,543 @@
|
||||
<!DOCTYPE html><html lang=""><head><meta charSet="utf-8"/><meta http-equiv="X-UA-Compatible" content="IE=edge"/><title>PyTorch3D · A library for deep learning with 3D data</title><meta name="viewport" content="width=device-width"/><meta name="generator" content="Docusaurus"/><meta name="description" content="A library for deep learning with 3D data"/><meta property="og:title" content="PyTorch3D · A library for deep learning with 3D data"/><meta property="og:type" content="website"/><meta property="og:url" content="https://pytorch3d.org/"/><meta property="og:description" content="A library for deep learning with 3D data"/><meta property="og:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><meta name="twitter:card" content="summary"/><meta name="twitter:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><link rel="shortcut icon" href="/img/pytorch3dfavicon.png"/><link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css"/><script>
|
||||
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
|
||||
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
|
||||
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
|
||||
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
var links = coll[i].nextElementSibling.getElementsByTagName('*');
|
||||
if (checkActiveCategory){
|
||||
for (var j = 0; j < links.length; j++) {
|
||||
if (links[j].classList.contains('navListItemActive')){
|
||||
coll[i].nextElementSibling.classList.toggle('hide');
|
||||
coll[i].childNodes[1].classList.toggle('rotate');
|
||||
checkActiveCategory = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
coll[i].addEventListener('click', function() {
|
||||
var arrow = this.childNodes[1];
|
||||
arrow.classList.toggle('rotate');
|
||||
var content = this.nextElementSibling;
|
||||
content.classList.toggle('hide');
|
||||
});
|
||||
}
|
||||
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
createToggler('#navToggler', '#docsNav', 'docsSliderActive');
|
||||
createToggler('#tocToggler', 'body', 'tocActive');
|
||||
|
||||
var headings = document.querySelector('.toc-headings');
|
||||
headings && headings.addEventListener('click', function(event) {
|
||||
var el = event.target;
|
||||
while(el !== headings){
|
||||
if (el.tagName === 'A') {
|
||||
document.body.classList.remove('tocActive');
|
||||
break;
|
||||
} else{
|
||||
el = el.parentNode;
|
||||
}
|
||||
}
|
||||
}, false);
|
||||
|
||||
function createToggler(togglerSelector, targetSelector, className) {
|
||||
var toggler = document.querySelector(togglerSelector);
|
||||
var target = document.querySelector(targetSelector);
|
||||
|
||||
if (!toggler) {
|
||||
return;
|
||||
}
|
||||
|
||||
toggler.onclick = function(event) {
|
||||
event.preventDefault();
|
||||
|
||||
target.classList.toggle(className);
|
||||
};
|
||||
}
|
||||
});
|
||||
</script></nav></div><div class="container mainContainer"><div class="wrapper"><div class="tutorialButtonsWrapper"><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="https://colab.research.google.com/github/facebookresearch/pytorch3d/blob/stable/docs/tutorials/render_colored_points.ipynb" target="_blank"><img class="colabButton" align="left" src="/img/colab_icon.png"/>Run in Google Colab</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/render_colored_points.ipynb" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Jupyter Notebook</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/render_colored_points.py" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Source Code</a></div></div><div class="tutorialBody">
|
||||
<script
|
||||
src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js">
|
||||
</script>
|
||||
<script
|
||||
src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js">
|
||||
</script>
|
||||
<div class="notebook">
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h1 id="Render-a-colored-point-cloud">Render a colored point cloud<a class="anchor-link" href="#Render-a-colored-point-cloud">¶</a></h1><p>This tutorial shows how to:</p>
|
||||
<ul>
|
||||
<li>set up a renderer </li>
|
||||
<li>render the point cloud </li>
|
||||
<li>vary the rendering settings such as compositing and camera position</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="Import-modules">Import modules<a class="anchor-link" href="#Import-modules">¶</a></h2>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>If <code>torch</code>, <code>torchvision</code> and <code>pytorch3d</code> are not installed, run the following cell:</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="kn">import</span> <span class="nn">torch.nn.functional</span> <span class="k">as</span> <span class="nn">F</span>
|
||||
<span class="kn">import</span> <span class="nn">matplotlib.pyplot</span> <span class="k">as</span> <span class="nn">plt</span>
|
||||
<span class="kn">from</span> <span class="nn">skimage.io</span> <span class="k">import</span> <span class="n">imread</span>
|
||||
|
||||
<span class="c1"># Util function for loading point clouds|</span>
|
||||
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
|
||||
|
||||
<span class="c1"># Data structures and functions for rendering</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.structures</span> <span class="k">import</span> <span class="n">Pointclouds</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.vis.plotly_vis</span> <span class="k">import</span> <span class="n">AxisArgs</span><span class="p">,</span> <span class="n">plot_batch_individually</span><span class="p">,</span> <span class="n">plot_scene</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.renderer</span> <span class="k">import</span> <span class="p">(</span>
|
||||
<span class="n">look_at_view_transform</span><span class="p">,</span>
|
||||
<span class="n">FoVOrthographicCameras</span><span class="p">,</span>
|
||||
<span class="n">PointsRasterizationSettings</span><span class="p">,</span>
|
||||
<span class="n">PointsRenderer</span><span class="p">,</span>
|
||||
<span class="n">PulsarPointsRenderer</span><span class="p">,</span>
|
||||
<span class="n">PointsRasterizer</span><span class="p">,</span>
|
||||
<span class="n">AlphaCompositor</span><span class="p">,</span>
|
||||
<span class="n">NormWeightedCompositor</span>
|
||||
<span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h3 id="Load-a-point-cloud-and-corresponding-colors">Load a point cloud and corresponding colors<a class="anchor-link" href="#Load-a-point-cloud-and-corresponding-colors">¶</a></h3><p>Load and create a <strong>Point Cloud</strong> object.</p>
|
||||
<p><strong>Pointclouds</strong> is a unique datastructure provided in PyTorch3D for working with batches of point clouds of different sizes.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>If running this notebook using <strong>Google Colab</strong>, run the following cell to fetch the pointcloud data and save it at the path <code>data/PittsburghBridge</code>:
|
||||
If running locally, the data is already available at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>mkdir -p data/PittsburghBridge
|
||||
<span class="o">!</span>wget -P data/PittsburghBridge https://dl.fbaipublicfiles.com/pytorch3d/data/PittsburghBridge/pointcloud.npz
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Setup</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">is_available</span><span class="p">():</span>
|
||||
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cuda:0"</span><span class="p">)</span>
|
||||
<span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">set_device</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cpu"</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Set paths</span>
|
||||
<span class="n">DATA_DIR</span> <span class="o">=</span> <span class="s2">"./data"</span>
|
||||
<span class="n">obj_filename</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">DATA_DIR</span><span class="p">,</span> <span class="s2">"PittsburghBridge/pointcloud.npz"</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Load point cloud</span>
|
||||
<span class="n">pointcloud</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="n">obj_filename</span><span class="p">)</span>
|
||||
<span class="n">verts</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">(</span><span class="n">pointcloud</span><span class="p">[</span><span class="s1">'verts'</span><span class="p">])</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
|
||||
|
||||
<span class="n">rgb</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">(</span><span class="n">pointcloud</span><span class="p">[</span><span class="s1">'rgb'</span><span class="p">])</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
|
||||
|
||||
<span class="n">point_cloud</span> <span class="o">=</span> <span class="n">Pointclouds</span><span class="p">(</span><span class="n">points</span><span class="o">=</span><span class="p">[</span><span class="n">verts</span><span class="p">],</span> <span class="n">features</span><span class="o">=</span><span class="p">[</span><span class="n">rgb</span><span class="p">])</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="Create-a-renderer">Create a renderer<a class="anchor-link" href="#Create-a-renderer">¶</a></h2><p>A renderer in PyTorch3D is composed of a <strong>rasterizer</strong> and a <strong>shader</strong> which each have a number of subcomponents such as a <strong>camera</strong> (orthgraphic/perspective). Here we initialize some of these components and use default values for the rest.</p>
|
||||
<p>In this example we will first create a <strong>renderer</strong> which uses an <strong>orthographic camera</strong>, and applies <strong>alpha compositing</strong>. Then we learn how to vary different components using the modular API.</p>
|
||||
<p>[1] <a href="https://arxiv.org/abs/1912.08804">SynSin: End to end View Synthesis from a Single Image.</a> Olivia Wiles, Georgia Gkioxari, Richard Szeliski, Justin Johnson. CVPR 2020.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Initialize a camera.</span>
|
||||
<span class="n">R</span><span class="p">,</span> <span class="n">T</span> <span class="o">=</span> <span class="n">look_at_view_transform</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">10</span><span class="p">,</span> <span class="mi">0</span><span class="p">)</span>
|
||||
<span class="n">cameras</span> <span class="o">=</span> <span class="n">FoVOrthographicCameras</span><span class="p">(</span><span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span> <span class="n">R</span><span class="o">=</span><span class="n">R</span><span class="p">,</span> <span class="n">T</span><span class="o">=</span><span class="n">T</span><span class="p">,</span> <span class="n">znear</span><span class="o">=</span><span class="mf">0.01</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Define the settings for rasterization and shading. Here we set the output image to be of size</span>
|
||||
<span class="c1"># 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1</span>
|
||||
<span class="c1"># and blur_radius=0.0. Refer to raster_points.py for explanations of these parameters. </span>
|
||||
<span class="n">raster_settings</span> <span class="o">=</span> <span class="n">PointsRasterizationSettings</span><span class="p">(</span>
|
||||
<span class="n">image_size</span><span class="o">=</span><span class="mi">512</span><span class="p">,</span>
|
||||
<span class="n">radius</span> <span class="o">=</span> <span class="mf">0.003</span><span class="p">,</span>
|
||||
<span class="n">points_per_pixel</span> <span class="o">=</span> <span class="mi">10</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
|
||||
<span class="c1"># Create a points renderer by compositing points using an alpha compositor (nearer points</span>
|
||||
<span class="c1"># are weighted more heavily). See [1] for an explanation.</span>
|
||||
<span class="n">rasterizer</span> <span class="o">=</span> <span class="n">PointsRasterizer</span><span class="p">(</span><span class="n">cameras</span><span class="o">=</span><span class="n">cameras</span><span class="p">,</span> <span class="n">raster_settings</span><span class="o">=</span><span class="n">raster_settings</span><span class="p">)</span>
|
||||
<span class="n">renderer</span> <span class="o">=</span> <span class="n">PointsRenderer</span><span class="p">(</span>
|
||||
<span class="n">rasterizer</span><span class="o">=</span><span class="n">rasterizer</span><span class="p">,</span>
|
||||
<span class="n">compositor</span><span class="o">=</span><span class="n">AlphaCompositor</span><span class="p">()</span>
|
||||
<span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">images</span> <span class="o">=</span> <span class="n">renderer</span><span class="p">(</span><span class="n">point_cloud</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">images</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>We will now modify the <strong>renderer</strong> to use <strong>alpha compositing</strong> with a set background color.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">renderer</span> <span class="o">=</span> <span class="n">PointsRenderer</span><span class="p">(</span>
|
||||
<span class="n">rasterizer</span><span class="o">=</span><span class="n">rasterizer</span><span class="p">,</span>
|
||||
<span class="c1"># Pass in background_color to the alpha compositor, setting the background color </span>
|
||||
<span class="c1"># to the 3 item tuple, representing rgb on a scale of 0 -> 1, in this case blue</span>
|
||||
<span class="n">compositor</span><span class="o">=</span><span class="n">AlphaCompositor</span><span class="p">(</span><span class="n">background_color</span><span class="o">=</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
|
||||
<span class="p">)</span>
|
||||
<span class="n">images</span> <span class="o">=</span> <span class="n">renderer</span><span class="p">(</span><span class="n">point_cloud</span><span class="p">)</span>
|
||||
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">images</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>In this example we will first create a <strong>renderer</strong> which uses an <strong>orthographic camera</strong>, and applies <strong>weighted compositing</strong>.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Initialize a camera.</span>
|
||||
<span class="n">R</span><span class="p">,</span> <span class="n">T</span> <span class="o">=</span> <span class="n">look_at_view_transform</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">10</span><span class="p">,</span> <span class="mi">0</span><span class="p">)</span>
|
||||
<span class="n">cameras</span> <span class="o">=</span> <span class="n">FoVOrthographicCameras</span><span class="p">(</span><span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span> <span class="n">R</span><span class="o">=</span><span class="n">R</span><span class="p">,</span> <span class="n">T</span><span class="o">=</span><span class="n">T</span><span class="p">,</span> <span class="n">znear</span><span class="o">=</span><span class="mf">0.01</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Define the settings for rasterization and shading. Here we set the output image to be of size</span>
|
||||
<span class="c1"># 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1</span>
|
||||
<span class="c1"># and blur_radius=0.0. Refer to rasterize_points.py for explanations of these parameters. </span>
|
||||
<span class="n">raster_settings</span> <span class="o">=</span> <span class="n">PointsRasterizationSettings</span><span class="p">(</span>
|
||||
<span class="n">image_size</span><span class="o">=</span><span class="mi">512</span><span class="p">,</span>
|
||||
<span class="n">radius</span> <span class="o">=</span> <span class="mf">0.003</span><span class="p">,</span>
|
||||
<span class="n">points_per_pixel</span> <span class="o">=</span> <span class="mi">10</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
|
||||
<span class="c1"># Create a points renderer by compositing points using an weighted compositor (3D points are</span>
|
||||
<span class="c1"># weighted according to their distance to a pixel and accumulated using a weighted sum)</span>
|
||||
<span class="n">renderer</span> <span class="o">=</span> <span class="n">PointsRenderer</span><span class="p">(</span>
|
||||
<span class="n">rasterizer</span><span class="o">=</span><span class="n">PointsRasterizer</span><span class="p">(</span><span class="n">cameras</span><span class="o">=</span><span class="n">cameras</span><span class="p">,</span> <span class="n">raster_settings</span><span class="o">=</span><span class="n">raster_settings</span><span class="p">),</span>
|
||||
<span class="n">compositor</span><span class="o">=</span><span class="n">NormWeightedCompositor</span><span class="p">()</span>
|
||||
<span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">images</span> <span class="o">=</span> <span class="n">renderer</span><span class="p">(</span><span class="n">point_cloud</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">images</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>We will now modify the <strong>renderer</strong> to use <strong>weighted compositing</strong> with a set background color.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">renderer</span> <span class="o">=</span> <span class="n">PointsRenderer</span><span class="p">(</span>
|
||||
<span class="n">rasterizer</span><span class="o">=</span><span class="n">PointsRasterizer</span><span class="p">(</span><span class="n">cameras</span><span class="o">=</span><span class="n">cameras</span><span class="p">,</span> <span class="n">raster_settings</span><span class="o">=</span><span class="n">raster_settings</span><span class="p">),</span>
|
||||
<span class="c1"># Pass in background_color to the norm weighted compositor, setting the background color </span>
|
||||
<span class="c1"># to the 3 item tuple, representing rgb on a scale of 0 -> 1, in this case red</span>
|
||||
<span class="n">compositor</span><span class="o">=</span><span class="n">NormWeightedCompositor</span><span class="p">(</span><span class="n">background_color</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span><span class="mi">0</span><span class="p">,</span><span class="mi">0</span><span class="p">))</span>
|
||||
<span class="p">)</span>
|
||||
<span class="n">images</span> <span class="o">=</span> <span class="n">renderer</span><span class="p">(</span><span class="n">point_cloud</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">images</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="Using-the-pulsar-backend">Using the pulsar backend<a class="anchor-link" href="#Using-the-pulsar-backend">¶</a></h2><p>Switching to the pulsar backend is easy! The pulsar backend has a compositor built-in, so the <code>compositor</code> argument is not required when creating it (a warning will be displayed if you provide it nevertheless). It pre-allocates memory on the rendering device, that's why it needs the <code>n_channels</code> at construction time.</p>
|
||||
<p>All parameters for the renderer forward function are batch-wise except the background color (in this example, <code>gamma</code>) and you have to provide as many values as you have examples in your batch. The background color is optional and by default set to all zeros. You can find a detailed explanation of how gamma influences the rendering function here in the paper <a href="https://arxiv.org/pdf/2004.07484.pdf">Fast Differentiable Raycasting for Neural Rendering using
|
||||
Sphere-based Representations</a>.</p>
|
||||
<p>You can also use the <code>native</code> backend for the pulsar backend which already provides access to point opacity. The native backend can be imported from <code>pytorch3d.renderer.points.pulsar</code>; you can find examples for this in the folder <code>docs/examples</code>.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">renderer</span> <span class="o">=</span> <span class="n">PulsarPointsRenderer</span><span class="p">(</span>
|
||||
<span class="n">rasterizer</span><span class="o">=</span><span class="n">PointsRasterizer</span><span class="p">(</span><span class="n">cameras</span><span class="o">=</span><span class="n">cameras</span><span class="p">,</span> <span class="n">raster_settings</span><span class="o">=</span><span class="n">raster_settings</span><span class="p">),</span>
|
||||
<span class="n">n_channels</span><span class="o">=</span><span class="mi">4</span>
|
||||
<span class="p">)</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
|
||||
|
||||
<span class="n">images</span> <span class="o">=</span> <span class="n">renderer</span><span class="p">(</span><span class="n">point_cloud</span><span class="p">,</span> <span class="n">gamma</span><span class="o">=</span><span class="p">(</span><span class="mf">1e-4</span><span class="p">,),</span>
|
||||
<span class="n">bg_col</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float32</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">images</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h3 id="View-pointclouds-in-Plotly-figures">View pointclouds in Plotly figures<a class="anchor-link" href="#View-pointclouds-in-Plotly-figures">¶</a></h3><p>Here we use the PyTorch3D function <code>plot_scene</code> to render the pointcloud in a Plotly figure. <code>plot_scene</code> returns a plotly figure with trace and subplots defined by the input.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"Pointcloud"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"person"</span><span class="p">:</span> <span class="n">point_cloud</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">})</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>We will now render a batch of pointclouds. The first pointcloud is the same as above, and the second is all-black and offset by 2 in all dimensions so we can see them on the same plot.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">point_cloud_batch</span> <span class="o">=</span> <span class="n">Pointclouds</span><span class="p">(</span><span class="n">points</span><span class="o">=</span><span class="p">[</span><span class="n">verts</span><span class="p">,</span> <span class="n">verts</span> <span class="o">+</span> <span class="mi">2</span><span class="p">],</span> <span class="n">features</span><span class="o">=</span><span class="p">[</span><span class="n">rgb</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">zeros_like</span><span class="p">(</span><span class="n">rgb</span><span class="p">)])</span>
|
||||
<span class="c1"># render both in the same plot in different traces</span>
|
||||
<span class="n">fig</span> <span class="o">=</span> <span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"Pointcloud"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"person"</span><span class="p">:</span> <span class="n">point_cloud_batch</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
|
||||
<span class="s2">"person2"</span><span class="p">:</span> <span class="n">point_cloud_batch</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">})</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># render both in the same plot in one trace</span>
|
||||
<span class="n">fig</span> <span class="o">=</span> <span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"Pointcloud"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"2 people"</span><span class="p">:</span> <span class="n">point_cloud_batch</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">})</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>For batches, we can also use <code>plot_batch_individually</code> to avoid constructing the scene dictionary ourselves.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># render both in 1 row in different subplots</span>
|
||||
<span class="n">fig2</span> <span class="o">=</span> <span class="n">plot_batch_individually</span><span class="p">(</span><span class="n">point_cloud_batch</span><span class="p">,</span> <span class="n">ncols</span><span class="o">=</span><span class="mi">2</span><span class="p">)</span>
|
||||
<span class="n">fig2</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># modify the plotly figure height and width</span>
|
||||
<span class="n">fig2</span><span class="o">.</span><span class="n">update_layout</span><span class="p">(</span><span class="n">height</span><span class="o">=</span><span class="mi">500</span><span class="p">,</span> <span class="n">width</span><span class="o">=</span><span class="mi">500</span><span class="p">)</span>
|
||||
<span class="n">fig2</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>We can also modify the axis arguments and axis backgrounds for either function, and title our plots in <code>plot_batch_individually</code>.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">fig3</span> <span class="o">=</span> <span class="n">plot_batch_individually</span><span class="p">(</span>
|
||||
<span class="n">point_cloud_batch</span><span class="p">,</span>
|
||||
<span class="n">xaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(200, 200, 230)"</span><span class="p">},</span>
|
||||
<span class="n">yaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(230, 200, 200)"</span><span class="p">},</span>
|
||||
<span class="n">zaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(200, 230, 200)"</span><span class="p">},</span>
|
||||
<span class="n">subplot_titles</span><span class="o">=</span><span class="p">[</span><span class="s2">"Pointcloud1"</span><span class="p">,</span> <span class="s2">"Pointcloud2"</span><span class="p">],</span> <span class="c1"># this should have a title for each subplot, titles can be ""</span>
|
||||
<span class="n">axis_args</span><span class="o">=</span><span class="n">AxisArgs</span><span class="p">(</span><span class="n">showgrid</span><span class="o">=</span><span class="kc">True</span><span class="p">))</span>
|
||||
<span class="n">fig3</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div></div></div></div></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2020 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>
|
||||
543
tutorials/render_colored_points/index.html
Normal file
543
tutorials/render_colored_points/index.html
Normal file
@@ -0,0 +1,543 @@
|
||||
<!DOCTYPE html><html lang=""><head><meta charSet="utf-8"/><meta http-equiv="X-UA-Compatible" content="IE=edge"/><title>PyTorch3D · A library for deep learning with 3D data</title><meta name="viewport" content="width=device-width"/><meta name="generator" content="Docusaurus"/><meta name="description" content="A library for deep learning with 3D data"/><meta property="og:title" content="PyTorch3D · A library for deep learning with 3D data"/><meta property="og:type" content="website"/><meta property="og:url" content="https://pytorch3d.org/"/><meta property="og:description" content="A library for deep learning with 3D data"/><meta property="og:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><meta name="twitter:card" content="summary"/><meta name="twitter:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><link rel="shortcut icon" href="/img/pytorch3dfavicon.png"/><link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css"/><script>
|
||||
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
|
||||
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
|
||||
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
|
||||
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
var links = coll[i].nextElementSibling.getElementsByTagName('*');
|
||||
if (checkActiveCategory){
|
||||
for (var j = 0; j < links.length; j++) {
|
||||
if (links[j].classList.contains('navListItemActive')){
|
||||
coll[i].nextElementSibling.classList.toggle('hide');
|
||||
coll[i].childNodes[1].classList.toggle('rotate');
|
||||
checkActiveCategory = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
coll[i].addEventListener('click', function() {
|
||||
var arrow = this.childNodes[1];
|
||||
arrow.classList.toggle('rotate');
|
||||
var content = this.nextElementSibling;
|
||||
content.classList.toggle('hide');
|
||||
});
|
||||
}
|
||||
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
createToggler('#navToggler', '#docsNav', 'docsSliderActive');
|
||||
createToggler('#tocToggler', 'body', 'tocActive');
|
||||
|
||||
var headings = document.querySelector('.toc-headings');
|
||||
headings && headings.addEventListener('click', function(event) {
|
||||
var el = event.target;
|
||||
while(el !== headings){
|
||||
if (el.tagName === 'A') {
|
||||
document.body.classList.remove('tocActive');
|
||||
break;
|
||||
} else{
|
||||
el = el.parentNode;
|
||||
}
|
||||
}
|
||||
}, false);
|
||||
|
||||
function createToggler(togglerSelector, targetSelector, className) {
|
||||
var toggler = document.querySelector(togglerSelector);
|
||||
var target = document.querySelector(targetSelector);
|
||||
|
||||
if (!toggler) {
|
||||
return;
|
||||
}
|
||||
|
||||
toggler.onclick = function(event) {
|
||||
event.preventDefault();
|
||||
|
||||
target.classList.toggle(className);
|
||||
};
|
||||
}
|
||||
});
|
||||
</script></nav></div><div class="container mainContainer"><div class="wrapper"><div class="tutorialButtonsWrapper"><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="https://colab.research.google.com/github/facebookresearch/pytorch3d/blob/stable/docs/tutorials/render_colored_points.ipynb" target="_blank"><img class="colabButton" align="left" src="/img/colab_icon.png"/>Run in Google Colab</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/render_colored_points.ipynb" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Jupyter Notebook</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/render_colored_points.py" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Source Code</a></div></div><div class="tutorialBody">
|
||||
<script
|
||||
src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js">
|
||||
</script>
|
||||
<script
|
||||
src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js">
|
||||
</script>
|
||||
<div class="notebook">
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h1 id="Render-a-colored-point-cloud">Render a colored point cloud<a class="anchor-link" href="#Render-a-colored-point-cloud">¶</a></h1><p>This tutorial shows how to:</p>
|
||||
<ul>
|
||||
<li>set up a renderer </li>
|
||||
<li>render the point cloud </li>
|
||||
<li>vary the rendering settings such as compositing and camera position</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="Import-modules">Import modules<a class="anchor-link" href="#Import-modules">¶</a></h2>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>If <code>torch</code>, <code>torchvision</code> and <code>pytorch3d</code> are not installed, run the following cell:</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="kn">import</span> <span class="nn">torch.nn.functional</span> <span class="k">as</span> <span class="nn">F</span>
|
||||
<span class="kn">import</span> <span class="nn">matplotlib.pyplot</span> <span class="k">as</span> <span class="nn">plt</span>
|
||||
<span class="kn">from</span> <span class="nn">skimage.io</span> <span class="k">import</span> <span class="n">imread</span>
|
||||
|
||||
<span class="c1"># Util function for loading point clouds|</span>
|
||||
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
|
||||
|
||||
<span class="c1"># Data structures and functions for rendering</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.structures</span> <span class="k">import</span> <span class="n">Pointclouds</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.vis.plotly_vis</span> <span class="k">import</span> <span class="n">AxisArgs</span><span class="p">,</span> <span class="n">plot_batch_individually</span><span class="p">,</span> <span class="n">plot_scene</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.renderer</span> <span class="k">import</span> <span class="p">(</span>
|
||||
<span class="n">look_at_view_transform</span><span class="p">,</span>
|
||||
<span class="n">FoVOrthographicCameras</span><span class="p">,</span>
|
||||
<span class="n">PointsRasterizationSettings</span><span class="p">,</span>
|
||||
<span class="n">PointsRenderer</span><span class="p">,</span>
|
||||
<span class="n">PulsarPointsRenderer</span><span class="p">,</span>
|
||||
<span class="n">PointsRasterizer</span><span class="p">,</span>
|
||||
<span class="n">AlphaCompositor</span><span class="p">,</span>
|
||||
<span class="n">NormWeightedCompositor</span>
|
||||
<span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h3 id="Load-a-point-cloud-and-corresponding-colors">Load a point cloud and corresponding colors<a class="anchor-link" href="#Load-a-point-cloud-and-corresponding-colors">¶</a></h3><p>Load and create a <strong>Point Cloud</strong> object.</p>
|
||||
<p><strong>Pointclouds</strong> is a unique datastructure provided in PyTorch3D for working with batches of point clouds of different sizes.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>If running this notebook using <strong>Google Colab</strong>, run the following cell to fetch the pointcloud data and save it at the path <code>data/PittsburghBridge</code>:
|
||||
If running locally, the data is already available at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>mkdir -p data/PittsburghBridge
|
||||
<span class="o">!</span>wget -P data/PittsburghBridge https://dl.fbaipublicfiles.com/pytorch3d/data/PittsburghBridge/pointcloud.npz
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Setup</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">is_available</span><span class="p">():</span>
|
||||
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cuda:0"</span><span class="p">)</span>
|
||||
<span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">set_device</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cpu"</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Set paths</span>
|
||||
<span class="n">DATA_DIR</span> <span class="o">=</span> <span class="s2">"./data"</span>
|
||||
<span class="n">obj_filename</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">DATA_DIR</span><span class="p">,</span> <span class="s2">"PittsburghBridge/pointcloud.npz"</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Load point cloud</span>
|
||||
<span class="n">pointcloud</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="n">obj_filename</span><span class="p">)</span>
|
||||
<span class="n">verts</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">(</span><span class="n">pointcloud</span><span class="p">[</span><span class="s1">'verts'</span><span class="p">])</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
|
||||
|
||||
<span class="n">rgb</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">(</span><span class="n">pointcloud</span><span class="p">[</span><span class="s1">'rgb'</span><span class="p">])</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
|
||||
|
||||
<span class="n">point_cloud</span> <span class="o">=</span> <span class="n">Pointclouds</span><span class="p">(</span><span class="n">points</span><span class="o">=</span><span class="p">[</span><span class="n">verts</span><span class="p">],</span> <span class="n">features</span><span class="o">=</span><span class="p">[</span><span class="n">rgb</span><span class="p">])</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="Create-a-renderer">Create a renderer<a class="anchor-link" href="#Create-a-renderer">¶</a></h2><p>A renderer in PyTorch3D is composed of a <strong>rasterizer</strong> and a <strong>shader</strong> which each have a number of subcomponents such as a <strong>camera</strong> (orthgraphic/perspective). Here we initialize some of these components and use default values for the rest.</p>
|
||||
<p>In this example we will first create a <strong>renderer</strong> which uses an <strong>orthographic camera</strong>, and applies <strong>alpha compositing</strong>. Then we learn how to vary different components using the modular API.</p>
|
||||
<p>[1] <a href="https://arxiv.org/abs/1912.08804">SynSin: End to end View Synthesis from a Single Image.</a> Olivia Wiles, Georgia Gkioxari, Richard Szeliski, Justin Johnson. CVPR 2020.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Initialize a camera.</span>
|
||||
<span class="n">R</span><span class="p">,</span> <span class="n">T</span> <span class="o">=</span> <span class="n">look_at_view_transform</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">10</span><span class="p">,</span> <span class="mi">0</span><span class="p">)</span>
|
||||
<span class="n">cameras</span> <span class="o">=</span> <span class="n">FoVOrthographicCameras</span><span class="p">(</span><span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span> <span class="n">R</span><span class="o">=</span><span class="n">R</span><span class="p">,</span> <span class="n">T</span><span class="o">=</span><span class="n">T</span><span class="p">,</span> <span class="n">znear</span><span class="o">=</span><span class="mf">0.01</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Define the settings for rasterization and shading. Here we set the output image to be of size</span>
|
||||
<span class="c1"># 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1</span>
|
||||
<span class="c1"># and blur_radius=0.0. Refer to raster_points.py for explanations of these parameters. </span>
|
||||
<span class="n">raster_settings</span> <span class="o">=</span> <span class="n">PointsRasterizationSettings</span><span class="p">(</span>
|
||||
<span class="n">image_size</span><span class="o">=</span><span class="mi">512</span><span class="p">,</span>
|
||||
<span class="n">radius</span> <span class="o">=</span> <span class="mf">0.003</span><span class="p">,</span>
|
||||
<span class="n">points_per_pixel</span> <span class="o">=</span> <span class="mi">10</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
|
||||
<span class="c1"># Create a points renderer by compositing points using an alpha compositor (nearer points</span>
|
||||
<span class="c1"># are weighted more heavily). See [1] for an explanation.</span>
|
||||
<span class="n">rasterizer</span> <span class="o">=</span> <span class="n">PointsRasterizer</span><span class="p">(</span><span class="n">cameras</span><span class="o">=</span><span class="n">cameras</span><span class="p">,</span> <span class="n">raster_settings</span><span class="o">=</span><span class="n">raster_settings</span><span class="p">)</span>
|
||||
<span class="n">renderer</span> <span class="o">=</span> <span class="n">PointsRenderer</span><span class="p">(</span>
|
||||
<span class="n">rasterizer</span><span class="o">=</span><span class="n">rasterizer</span><span class="p">,</span>
|
||||
<span class="n">compositor</span><span class="o">=</span><span class="n">AlphaCompositor</span><span class="p">()</span>
|
||||
<span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">images</span> <span class="o">=</span> <span class="n">renderer</span><span class="p">(</span><span class="n">point_cloud</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">images</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>We will now modify the <strong>renderer</strong> to use <strong>alpha compositing</strong> with a set background color.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">renderer</span> <span class="o">=</span> <span class="n">PointsRenderer</span><span class="p">(</span>
|
||||
<span class="n">rasterizer</span><span class="o">=</span><span class="n">rasterizer</span><span class="p">,</span>
|
||||
<span class="c1"># Pass in background_color to the alpha compositor, setting the background color </span>
|
||||
<span class="c1"># to the 3 item tuple, representing rgb on a scale of 0 -> 1, in this case blue</span>
|
||||
<span class="n">compositor</span><span class="o">=</span><span class="n">AlphaCompositor</span><span class="p">(</span><span class="n">background_color</span><span class="o">=</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
|
||||
<span class="p">)</span>
|
||||
<span class="n">images</span> <span class="o">=</span> <span class="n">renderer</span><span class="p">(</span><span class="n">point_cloud</span><span class="p">)</span>
|
||||
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">images</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>In this example we will first create a <strong>renderer</strong> which uses an <strong>orthographic camera</strong>, and applies <strong>weighted compositing</strong>.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Initialize a camera.</span>
|
||||
<span class="n">R</span><span class="p">,</span> <span class="n">T</span> <span class="o">=</span> <span class="n">look_at_view_transform</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">10</span><span class="p">,</span> <span class="mi">0</span><span class="p">)</span>
|
||||
<span class="n">cameras</span> <span class="o">=</span> <span class="n">FoVOrthographicCameras</span><span class="p">(</span><span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span> <span class="n">R</span><span class="o">=</span><span class="n">R</span><span class="p">,</span> <span class="n">T</span><span class="o">=</span><span class="n">T</span><span class="p">,</span> <span class="n">znear</span><span class="o">=</span><span class="mf">0.01</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Define the settings for rasterization and shading. Here we set the output image to be of size</span>
|
||||
<span class="c1"># 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1</span>
|
||||
<span class="c1"># and blur_radius=0.0. Refer to rasterize_points.py for explanations of these parameters. </span>
|
||||
<span class="n">raster_settings</span> <span class="o">=</span> <span class="n">PointsRasterizationSettings</span><span class="p">(</span>
|
||||
<span class="n">image_size</span><span class="o">=</span><span class="mi">512</span><span class="p">,</span>
|
||||
<span class="n">radius</span> <span class="o">=</span> <span class="mf">0.003</span><span class="p">,</span>
|
||||
<span class="n">points_per_pixel</span> <span class="o">=</span> <span class="mi">10</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
|
||||
<span class="c1"># Create a points renderer by compositing points using an weighted compositor (3D points are</span>
|
||||
<span class="c1"># weighted according to their distance to a pixel and accumulated using a weighted sum)</span>
|
||||
<span class="n">renderer</span> <span class="o">=</span> <span class="n">PointsRenderer</span><span class="p">(</span>
|
||||
<span class="n">rasterizer</span><span class="o">=</span><span class="n">PointsRasterizer</span><span class="p">(</span><span class="n">cameras</span><span class="o">=</span><span class="n">cameras</span><span class="p">,</span> <span class="n">raster_settings</span><span class="o">=</span><span class="n">raster_settings</span><span class="p">),</span>
|
||||
<span class="n">compositor</span><span class="o">=</span><span class="n">NormWeightedCompositor</span><span class="p">()</span>
|
||||
<span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">images</span> <span class="o">=</span> <span class="n">renderer</span><span class="p">(</span><span class="n">point_cloud</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">images</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>We will now modify the <strong>renderer</strong> to use <strong>weighted compositing</strong> with a set background color.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">renderer</span> <span class="o">=</span> <span class="n">PointsRenderer</span><span class="p">(</span>
|
||||
<span class="n">rasterizer</span><span class="o">=</span><span class="n">PointsRasterizer</span><span class="p">(</span><span class="n">cameras</span><span class="o">=</span><span class="n">cameras</span><span class="p">,</span> <span class="n">raster_settings</span><span class="o">=</span><span class="n">raster_settings</span><span class="p">),</span>
|
||||
<span class="c1"># Pass in background_color to the norm weighted compositor, setting the background color </span>
|
||||
<span class="c1"># to the 3 item tuple, representing rgb on a scale of 0 -> 1, in this case red</span>
|
||||
<span class="n">compositor</span><span class="o">=</span><span class="n">NormWeightedCompositor</span><span class="p">(</span><span class="n">background_color</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span><span class="mi">0</span><span class="p">,</span><span class="mi">0</span><span class="p">))</span>
|
||||
<span class="p">)</span>
|
||||
<span class="n">images</span> <span class="o">=</span> <span class="n">renderer</span><span class="p">(</span><span class="n">point_cloud</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">images</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="Using-the-pulsar-backend">Using the pulsar backend<a class="anchor-link" href="#Using-the-pulsar-backend">¶</a></h2><p>Switching to the pulsar backend is easy! The pulsar backend has a compositor built-in, so the <code>compositor</code> argument is not required when creating it (a warning will be displayed if you provide it nevertheless). It pre-allocates memory on the rendering device, that's why it needs the <code>n_channels</code> at construction time.</p>
|
||||
<p>All parameters for the renderer forward function are batch-wise except the background color (in this example, <code>gamma</code>) and you have to provide as many values as you have examples in your batch. The background color is optional and by default set to all zeros. You can find a detailed explanation of how gamma influences the rendering function here in the paper <a href="https://arxiv.org/pdf/2004.07484.pdf">Fast Differentiable Raycasting for Neural Rendering using
|
||||
Sphere-based Representations</a>.</p>
|
||||
<p>You can also use the <code>native</code> backend for the pulsar backend which already provides access to point opacity. The native backend can be imported from <code>pytorch3d.renderer.points.pulsar</code>; you can find examples for this in the folder <code>docs/examples</code>.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">renderer</span> <span class="o">=</span> <span class="n">PulsarPointsRenderer</span><span class="p">(</span>
|
||||
<span class="n">rasterizer</span><span class="o">=</span><span class="n">PointsRasterizer</span><span class="p">(</span><span class="n">cameras</span><span class="o">=</span><span class="n">cameras</span><span class="p">,</span> <span class="n">raster_settings</span><span class="o">=</span><span class="n">raster_settings</span><span class="p">),</span>
|
||||
<span class="n">n_channels</span><span class="o">=</span><span class="mi">4</span>
|
||||
<span class="p">)</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
|
||||
|
||||
<span class="n">images</span> <span class="o">=</span> <span class="n">renderer</span><span class="p">(</span><span class="n">point_cloud</span><span class="p">,</span> <span class="n">gamma</span><span class="o">=</span><span class="p">(</span><span class="mf">1e-4</span><span class="p">,),</span>
|
||||
<span class="n">bg_col</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float32</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">images</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h3 id="View-pointclouds-in-Plotly-figures">View pointclouds in Plotly figures<a class="anchor-link" href="#View-pointclouds-in-Plotly-figures">¶</a></h3><p>Here we use the PyTorch3D function <code>plot_scene</code> to render the pointcloud in a Plotly figure. <code>plot_scene</code> returns a plotly figure with trace and subplots defined by the input.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"Pointcloud"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"person"</span><span class="p">:</span> <span class="n">point_cloud</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">})</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>We will now render a batch of pointclouds. The first pointcloud is the same as above, and the second is all-black and offset by 2 in all dimensions so we can see them on the same plot.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">point_cloud_batch</span> <span class="o">=</span> <span class="n">Pointclouds</span><span class="p">(</span><span class="n">points</span><span class="o">=</span><span class="p">[</span><span class="n">verts</span><span class="p">,</span> <span class="n">verts</span> <span class="o">+</span> <span class="mi">2</span><span class="p">],</span> <span class="n">features</span><span class="o">=</span><span class="p">[</span><span class="n">rgb</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">zeros_like</span><span class="p">(</span><span class="n">rgb</span><span class="p">)])</span>
|
||||
<span class="c1"># render both in the same plot in different traces</span>
|
||||
<span class="n">fig</span> <span class="o">=</span> <span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"Pointcloud"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"person"</span><span class="p">:</span> <span class="n">point_cloud_batch</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
|
||||
<span class="s2">"person2"</span><span class="p">:</span> <span class="n">point_cloud_batch</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">})</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># render both in the same plot in one trace</span>
|
||||
<span class="n">fig</span> <span class="o">=</span> <span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"Pointcloud"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"2 people"</span><span class="p">:</span> <span class="n">point_cloud_batch</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">})</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>For batches, we can also use <code>plot_batch_individually</code> to avoid constructing the scene dictionary ourselves.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># render both in 1 row in different subplots</span>
|
||||
<span class="n">fig2</span> <span class="o">=</span> <span class="n">plot_batch_individually</span><span class="p">(</span><span class="n">point_cloud_batch</span><span class="p">,</span> <span class="n">ncols</span><span class="o">=</span><span class="mi">2</span><span class="p">)</span>
|
||||
<span class="n">fig2</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># modify the plotly figure height and width</span>
|
||||
<span class="n">fig2</span><span class="o">.</span><span class="n">update_layout</span><span class="p">(</span><span class="n">height</span><span class="o">=</span><span class="mi">500</span><span class="p">,</span> <span class="n">width</span><span class="o">=</span><span class="mi">500</span><span class="p">)</span>
|
||||
<span class="n">fig2</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>We can also modify the axis arguments and axis backgrounds for either function, and title our plots in <code>plot_batch_individually</code>.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">fig3</span> <span class="o">=</span> <span class="n">plot_batch_individually</span><span class="p">(</span>
|
||||
<span class="n">point_cloud_batch</span><span class="p">,</span>
|
||||
<span class="n">xaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(200, 200, 230)"</span><span class="p">},</span>
|
||||
<span class="n">yaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(230, 200, 200)"</span><span class="p">},</span>
|
||||
<span class="n">zaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(200, 230, 200)"</span><span class="p">},</span>
|
||||
<span class="n">subplot_titles</span><span class="o">=</span><span class="p">[</span><span class="s2">"Pointcloud1"</span><span class="p">,</span> <span class="s2">"Pointcloud2"</span><span class="p">],</span> <span class="c1"># this should have a title for each subplot, titles can be ""</span>
|
||||
<span class="n">axis_args</span><span class="o">=</span><span class="n">AxisArgs</span><span class="p">(</span><span class="n">showgrid</span><span class="o">=</span><span class="kc">True</span><span class="p">))</span>
|
||||
<span class="n">fig3</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div></div></div></div></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2020 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>
|
||||
491
tutorials/render_densepose.html
Normal file
491
tutorials/render_densepose.html
Normal file
@@ -0,0 +1,491 @@
|
||||
<!DOCTYPE html><html lang=""><head><meta charSet="utf-8"/><meta http-equiv="X-UA-Compatible" content="IE=edge"/><title>PyTorch3D · A library for deep learning with 3D data</title><meta name="viewport" content="width=device-width"/><meta name="generator" content="Docusaurus"/><meta name="description" content="A library for deep learning with 3D data"/><meta property="og:title" content="PyTorch3D · A library for deep learning with 3D data"/><meta property="og:type" content="website"/><meta property="og:url" content="https://pytorch3d.org/"/><meta property="og:description" content="A library for deep learning with 3D data"/><meta property="og:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><meta name="twitter:card" content="summary"/><meta name="twitter:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><link rel="shortcut icon" href="/img/pytorch3dfavicon.png"/><link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css"/><script>
|
||||
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
|
||||
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
|
||||
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
|
||||
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
var links = coll[i].nextElementSibling.getElementsByTagName('*');
|
||||
if (checkActiveCategory){
|
||||
for (var j = 0; j < links.length; j++) {
|
||||
if (links[j].classList.contains('navListItemActive')){
|
||||
coll[i].nextElementSibling.classList.toggle('hide');
|
||||
coll[i].childNodes[1].classList.toggle('rotate');
|
||||
checkActiveCategory = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
coll[i].addEventListener('click', function() {
|
||||
var arrow = this.childNodes[1];
|
||||
arrow.classList.toggle('rotate');
|
||||
var content = this.nextElementSibling;
|
||||
content.classList.toggle('hide');
|
||||
});
|
||||
}
|
||||
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
createToggler('#navToggler', '#docsNav', 'docsSliderActive');
|
||||
createToggler('#tocToggler', 'body', 'tocActive');
|
||||
|
||||
var headings = document.querySelector('.toc-headings');
|
||||
headings && headings.addEventListener('click', function(event) {
|
||||
var el = event.target;
|
||||
while(el !== headings){
|
||||
if (el.tagName === 'A') {
|
||||
document.body.classList.remove('tocActive');
|
||||
break;
|
||||
} else{
|
||||
el = el.parentNode;
|
||||
}
|
||||
}
|
||||
}, false);
|
||||
|
||||
function createToggler(togglerSelector, targetSelector, className) {
|
||||
var toggler = document.querySelector(togglerSelector);
|
||||
var target = document.querySelector(targetSelector);
|
||||
|
||||
if (!toggler) {
|
||||
return;
|
||||
}
|
||||
|
||||
toggler.onclick = function(event) {
|
||||
event.preventDefault();
|
||||
|
||||
target.classList.toggle(className);
|
||||
};
|
||||
}
|
||||
});
|
||||
</script></nav></div><div class="container mainContainer"><div class="wrapper"><div class="tutorialButtonsWrapper"><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="https://colab.research.google.com/github/facebookresearch/pytorch3d/blob/stable/docs/tutorials/render_densepose.ipynb" target="_blank"><img class="colabButton" align="left" src="/img/colab_icon.png"/>Run in Google Colab</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/render_densepose.ipynb" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Jupyter Notebook</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/render_densepose.py" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Source Code</a></div></div><div class="tutorialBody">
|
||||
<script
|
||||
src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js">
|
||||
</script>
|
||||
<script
|
||||
src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js">
|
||||
</script>
|
||||
<div class="notebook">
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h1 id="Render-DensePose">Render DensePose<a class="anchor-link" href="#Render-DensePose">¶</a></h1><p>DensePose refers to dense human pose representation: <a href="https://github.com/facebookresearch/DensePose">https://github.com/facebookresearch/DensePose</a>.
|
||||
In this tutorial, we provide an example of using DensePose data in PyTorch3D.</p>
|
||||
<p>This tutorial shows how to:</p>
|
||||
<ul>
|
||||
<li>load a mesh and textures from densepose <code>.mat</code> and <code>.pkl</code> files</li>
|
||||
<li>set up a renderer </li>
|
||||
<li>render the mesh </li>
|
||||
<li>vary the rendering settings such as lighting and camera position</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="Import-modules">Import modules<a class="anchor-link" href="#Import-modules">¶</a></h2>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>If torch, torchvision and PyTorch3D are not installed, run the following cell:</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># We also install chumpy as it is needed to load the SMPL model pickle file.</span>
|
||||
<span class="o">!</span>pip install chumpy
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="kn">import</span> <span class="nn">matplotlib.pyplot</span> <span class="k">as</span> <span class="nn">plt</span>
|
||||
<span class="kn">from</span> <span class="nn">skimage.io</span> <span class="k">import</span> <span class="n">imread</span>
|
||||
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
|
||||
|
||||
<span class="c1"># libraries for reading data from files</span>
|
||||
<span class="kn">from</span> <span class="nn">scipy.io</span> <span class="k">import</span> <span class="n">loadmat</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.io.utils</span> <span class="k">import</span> <span class="n">_read_image</span>
|
||||
<span class="kn">import</span> <span class="nn">pickle</span>
|
||||
|
||||
<span class="c1"># Data structures and functions for rendering</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.structures</span> <span class="k">import</span> <span class="n">Meshes</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.renderer</span> <span class="k">import</span> <span class="p">(</span>
|
||||
<span class="n">look_at_view_transform</span><span class="p">,</span>
|
||||
<span class="n">FoVPerspectiveCameras</span><span class="p">,</span>
|
||||
<span class="n">PointLights</span><span class="p">,</span>
|
||||
<span class="n">DirectionalLights</span><span class="p">,</span>
|
||||
<span class="n">Materials</span><span class="p">,</span>
|
||||
<span class="n">RasterizationSettings</span><span class="p">,</span>
|
||||
<span class="n">MeshRenderer</span><span class="p">,</span>
|
||||
<span class="n">MeshRasterizer</span><span class="p">,</span>
|
||||
<span class="n">SoftPhongShader</span><span class="p">,</span>
|
||||
<span class="n">TexturesUV</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
<span class="c1"># add path for demo utils functions </span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="n">sys</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">abspath</span><span class="p">(</span><span class="s1">''</span><span class="p">))</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="Load-the-SMPL-model">Load the SMPL model<a class="anchor-link" href="#Load-the-SMPL-model">¶</a></h2><h4 id="Download-the-SMPL-model">Download the SMPL model<a class="anchor-link" href="#Download-the-SMPL-model">¶</a></h4><ul>
|
||||
<li>Go to <a href="http://smpl.is.tue.mpg.de/downloads">http://smpl.is.tue.mpg.de/downloads</a> and sign up.</li>
|
||||
<li>Download SMPL for Python Users and unzip.</li>
|
||||
<li>Copy the file male template file <strong>'models/basicModel_m_lbs_10_207_0_v1.0.0.pkl'</strong> to the data/DensePose/ folder.<ul>
|
||||
<li>rename the file to <strong>'smpl_model.pkl'</strong> or rename the string where it's commented below</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<p>If running this notebook using Google Colab, run the following cell to fetch the texture and UV values and save it at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Texture image</span>
|
||||
<span class="o">!</span>wget -P data/DensePose https://raw.githubusercontent.com/facebookresearch/DensePose/master/DensePoseData/demo_data/texture_from_SURREAL.png
|
||||
|
||||
<span class="c1"># UV_processed.mat</span>
|
||||
<span class="o">!</span>wget https://dl.fbaipublicfiles.com/densepose/densepose_uv_data.tar.gz
|
||||
<span class="o">!</span>tar xvf densepose_uv_data.tar.gz -C data/DensePose
|
||||
<span class="o">!</span>rm densepose_uv_data.tar.gz
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>Load our texture UV data and our SMPL data, with some processing to correct data values and format.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Setup</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">is_available</span><span class="p">():</span>
|
||||
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cuda:0"</span><span class="p">)</span>
|
||||
<span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">set_device</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cpu"</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Set paths</span>
|
||||
<span class="n">DATA_DIR</span> <span class="o">=</span> <span class="s2">"./data"</span>
|
||||
<span class="n">data_filename</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">DATA_DIR</span><span class="p">,</span> <span class="s2">"DensePose/UV_Processed.mat"</span><span class="p">)</span>
|
||||
<span class="n">tex_filename</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">DATA_DIR</span><span class="p">,</span><span class="s2">"DensePose/texture_from_SURREAL.png"</span><span class="p">)</span>
|
||||
<span class="c1"># rename your .pkl file or change this string</span>
|
||||
<span class="n">verts_filename</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">DATA_DIR</span><span class="p">,</span> <span class="s2">"DensePose/smpl_model.pkl"</span><span class="p">)</span>
|
||||
|
||||
|
||||
<span class="c1"># Load SMPL and texture data</span>
|
||||
<span class="k">with</span> <span class="nb">open</span><span class="p">(</span><span class="n">verts_filename</span><span class="p">,</span> <span class="s1">'rb'</span><span class="p">)</span> <span class="k">as</span> <span class="n">f</span><span class="p">:</span>
|
||||
<span class="n">data</span> <span class="o">=</span> <span class="n">pickle</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="n">f</span><span class="p">,</span> <span class="n">encoding</span><span class="o">=</span><span class="s1">'latin1'</span><span class="p">)</span>
|
||||
<span class="n">v_template</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">(</span><span class="n">data</span><span class="p">[</span><span class="s1">'v_template'</span><span class="p">])</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span> <span class="c1"># (6890, 3)</span>
|
||||
<span class="n">ALP_UV</span> <span class="o">=</span> <span class="n">loadmat</span><span class="p">(</span><span class="n">data_filename</span><span class="p">)</span>
|
||||
<span class="n">tex</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">from_numpy</span><span class="p">(</span><span class="n">_read_image</span><span class="p">(</span><span class="n">file_name</span><span class="o">=</span><span class="n">tex_filename</span><span class="p">,</span> <span class="nb">format</span><span class="o">=</span><span class="s1">'RGB'</span><span class="p">)</span> <span class="o">/</span> <span class="mf">255.</span> <span class="p">)</span><span class="o">.</span><span class="n">unsqueeze</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
|
||||
|
||||
<span class="n">verts</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">from_numpy</span><span class="p">((</span><span class="n">ALP_UV</span><span class="p">[</span><span class="s2">"All_vertices"</span><span class="p">])</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="nb">int</span><span class="p">))</span><span class="o">.</span><span class="n">squeeze</span><span class="p">()</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span> <span class="c1"># (7829, 1)</span>
|
||||
<span class="n">U</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">(</span><span class="n">ALP_UV</span><span class="p">[</span><span class="s1">'All_U_norm'</span><span class="p">])</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span> <span class="c1"># (7829, 1)</span>
|
||||
<span class="n">V</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">(</span><span class="n">ALP_UV</span><span class="p">[</span><span class="s1">'All_V_norm'</span><span class="p">])</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span> <span class="c1"># (7829, 1)</span>
|
||||
<span class="n">faces</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">from_numpy</span><span class="p">((</span><span class="n">ALP_UV</span><span class="p">[</span><span class="s1">'All_Faces'</span><span class="p">]</span> <span class="o">-</span> <span class="mi">1</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="nb">int</span><span class="p">))</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span> <span class="c1"># (13774, 3)</span>
|
||||
<span class="n">face_indices</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">(</span><span class="n">ALP_UV</span><span class="p">[</span><span class="s1">'All_FaceIndices'</span><span class="p">])</span><span class="o">.</span><span class="n">squeeze</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Display the texture image</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">tex</span><span class="o">.</span><span class="n">squeeze</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span><span class="o">.</span><span class="n">cpu</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>In DensePose, the body mesh is split into 24 parts. In the texture image, we can see the 24 parts are separated out into individual (200, 200) images per body part. The convention in DensePose is that each face in the mesh is associated with a body part (given by the face_indices tensor above). The vertex UV values (in the range [0, 1]) for each face are specific to the (200, 200) size texture map for the part of the body that the mesh face corresponds to. We cannot use them directly with the entire texture map. We have to offset the vertex UV values depending on what body part the associated face corresponds to.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Map each face to a (u, v) offset</span>
|
||||
<span class="n">offset_per_part</span> <span class="o">=</span> <span class="p">{}</span>
|
||||
<span class="n">already_offset</span> <span class="o">=</span> <span class="nb">set</span><span class="p">()</span>
|
||||
<span class="n">cols</span><span class="p">,</span> <span class="n">rows</span> <span class="o">=</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">6</span>
|
||||
<span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">u</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">linspace</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">cols</span><span class="p">,</span> <span class="n">endpoint</span><span class="o">=</span><span class="kc">False</span><span class="p">)):</span>
|
||||
<span class="k">for</span> <span class="n">j</span><span class="p">,</span> <span class="n">v</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">linspace</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">rows</span><span class="p">,</span> <span class="n">endpoint</span><span class="o">=</span><span class="kc">False</span><span class="p">)):</span>
|
||||
<span class="n">part</span> <span class="o">=</span> <span class="n">rows</span> <span class="o">*</span> <span class="n">i</span> <span class="o">+</span> <span class="n">j</span> <span class="o">+</span> <span class="mi">1</span> <span class="c1"># parts are 1-indexed in face_indices</span>
|
||||
<span class="n">offset_per_part</span><span class="p">[</span><span class="n">part</span><span class="p">]</span> <span class="o">=</span> <span class="p">(</span><span class="n">u</span><span class="p">,</span> <span class="n">v</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># iterate over faces and offset the corresponding vertex u and v values</span>
|
||||
<span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">faces</span><span class="p">)):</span>
|
||||
<span class="n">face_vert_idxs</span> <span class="o">=</span> <span class="n">faces</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
|
||||
<span class="n">part</span> <span class="o">=</span> <span class="n">face_indices</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
|
||||
<span class="n">offset_u</span><span class="p">,</span> <span class="n">offset_v</span> <span class="o">=</span> <span class="n">offset_per_part</span><span class="p">[</span><span class="nb">int</span><span class="p">(</span><span class="n">part</span><span class="o">.</span><span class="n">item</span><span class="p">())]</span>
|
||||
|
||||
<span class="k">for</span> <span class="n">vert_idx</span> <span class="ow">in</span> <span class="n">face_vert_idxs</span><span class="p">:</span>
|
||||
<span class="c1"># vertices are reused, but we don't want to offset multiple times</span>
|
||||
<span class="k">if</span> <span class="n">vert_idx</span><span class="o">.</span><span class="n">item</span><span class="p">()</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">already_offset</span><span class="p">:</span>
|
||||
<span class="c1"># offset u value</span>
|
||||
<span class="n">U</span><span class="p">[</span><span class="n">vert_idx</span><span class="p">]</span> <span class="o">=</span> <span class="n">U</span><span class="p">[</span><span class="n">vert_idx</span><span class="p">]</span> <span class="o">/</span> <span class="n">cols</span> <span class="o">+</span> <span class="n">offset_u</span>
|
||||
<span class="c1"># offset v value</span>
|
||||
<span class="c1"># this also flips each part locally, as each part is upside down</span>
|
||||
<span class="n">V</span><span class="p">[</span><span class="n">vert_idx</span><span class="p">]</span> <span class="o">=</span> <span class="p">(</span><span class="mi">1</span> <span class="o">-</span> <span class="n">V</span><span class="p">[</span><span class="n">vert_idx</span><span class="p">])</span> <span class="o">/</span> <span class="n">rows</span> <span class="o">+</span> <span class="n">offset_v</span>
|
||||
<span class="c1"># add vertex to our set tracking offsetted vertices</span>
|
||||
<span class="n">already_offset</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">vert_idx</span><span class="o">.</span><span class="n">item</span><span class="p">())</span>
|
||||
|
||||
<span class="c1"># invert V values</span>
|
||||
<span class="n">U_norm</span><span class="p">,</span> <span class="n">V_norm</span> <span class="o">=</span> <span class="n">U</span><span class="p">,</span> <span class="mi">1</span> <span class="o">-</span> <span class="n">V</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># create our verts_uv values</span>
|
||||
<span class="n">verts_uv</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cat</span><span class="p">([</span><span class="n">U_norm</span><span class="p">[</span><span class="kc">None</span><span class="p">],</span><span class="n">V_norm</span><span class="p">[</span><span class="kc">None</span><span class="p">]],</span> <span class="n">dim</span><span class="o">=</span><span class="mi">2</span><span class="p">)</span> <span class="c1"># (1, 7829, 2)</span>
|
||||
|
||||
<span class="c1"># There are 6890 xyz vertex coordinates but 7829 vertex uv coordinates. </span>
|
||||
<span class="c1"># This is because the same vertex can be shared by multiple faces where each face may correspond to a different body part. </span>
|
||||
<span class="c1"># Therefore when initializing the Meshes class,</span>
|
||||
<span class="c1"># we need to map each of the vertices referenced by the DensePose faces (in verts, which is the "All_vertices" field)</span>
|
||||
<span class="c1"># to the correct xyz coordinate in the SMPL template mesh.</span>
|
||||
<span class="n">v_template_extended</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">stack</span><span class="p">(</span><span class="nb">list</span><span class="p">(</span><span class="nb">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">vert</span><span class="p">:</span> <span class="n">v_template</span><span class="p">[</span><span class="n">vert</span><span class="o">-</span><span class="mi">1</span><span class="p">],</span> <span class="n">verts</span><span class="p">)))</span><span class="o">.</span><span class="n">unsqueeze</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span> <span class="c1"># (1, 7829, 3)</span>
|
||||
|
||||
<span class="c1"># add a batch dimension to faces</span>
|
||||
<span class="n">faces</span> <span class="o">=</span> <span class="n">faces</span><span class="o">.</span><span class="n">unsqueeze</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h3 id="Create-our-textured-mesh">Create our textured mesh<a class="anchor-link" href="#Create-our-textured-mesh">¶</a></h3><p><strong>Meshes</strong> is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes.</p>
|
||||
<p><strong>TexturesUV</strong> is an auxillary datastructure for storing vertex uv and texture maps for meshes.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">texture</span> <span class="o">=</span> <span class="n">TexturesUV</span><span class="p">(</span><span class="n">maps</span><span class="o">=</span><span class="n">tex</span><span class="p">,</span> <span class="n">faces_uvs</span><span class="o">=</span><span class="n">faces</span><span class="p">,</span> <span class="n">verts_uvs</span><span class="o">=</span><span class="n">verts_uv</span><span class="p">)</span>
|
||||
<span class="n">mesh</span> <span class="o">=</span> <span class="n">Meshes</span><span class="p">(</span><span class="n">v_template_extended</span><span class="p">,</span> <span class="n">faces</span><span class="p">,</span> <span class="n">texture</span><span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="Create-a-renderer">Create a renderer<a class="anchor-link" href="#Create-a-renderer">¶</a></h2>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Initialize a camera.</span>
|
||||
<span class="c1"># World coordinates +Y up, +X left and +Z in.</span>
|
||||
<span class="n">R</span><span class="p">,</span> <span class="n">T</span> <span class="o">=</span> <span class="n">look_at_view_transform</span><span class="p">(</span><span class="mf">2.7</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">)</span>
|
||||
<span class="n">cameras</span> <span class="o">=</span> <span class="n">FoVPerspectiveCameras</span><span class="p">(</span><span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span> <span class="n">R</span><span class="o">=</span><span class="n">R</span><span class="p">,</span> <span class="n">T</span><span class="o">=</span><span class="n">T</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Define the settings for rasterization and shading. Here we set the output image to be of size</span>
|
||||
<span class="c1"># 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1</span>
|
||||
<span class="c1"># and blur_radius=0.0. </span>
|
||||
<span class="n">raster_settings</span> <span class="o">=</span> <span class="n">RasterizationSettings</span><span class="p">(</span>
|
||||
<span class="n">image_size</span><span class="o">=</span><span class="mi">512</span><span class="p">,</span>
|
||||
<span class="n">blur_radius</span><span class="o">=</span><span class="mf">0.0</span><span class="p">,</span>
|
||||
<span class="n">faces_per_pixel</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
<span class="c1"># Place a point light in front of the person. </span>
|
||||
<span class="n">lights</span> <span class="o">=</span> <span class="n">PointLights</span><span class="p">(</span><span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span> <span class="n">location</span><span class="o">=</span><span class="p">[[</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">,</span> <span class="mf">2.0</span><span class="p">]])</span>
|
||||
|
||||
<span class="c1"># Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will </span>
|
||||
<span class="c1"># interpolate the texture uv coordinates for each vertex, sample from a texture image and </span>
|
||||
<span class="c1"># apply the Phong lighting model</span>
|
||||
<span class="n">renderer</span> <span class="o">=</span> <span class="n">MeshRenderer</span><span class="p">(</span>
|
||||
<span class="n">rasterizer</span><span class="o">=</span><span class="n">MeshRasterizer</span><span class="p">(</span>
|
||||
<span class="n">cameras</span><span class="o">=</span><span class="n">cameras</span><span class="p">,</span>
|
||||
<span class="n">raster_settings</span><span class="o">=</span><span class="n">raster_settings</span>
|
||||
<span class="p">),</span>
|
||||
<span class="n">shader</span><span class="o">=</span><span class="n">SoftPhongShader</span><span class="p">(</span>
|
||||
<span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span>
|
||||
<span class="n">cameras</span><span class="o">=</span><span class="n">cameras</span><span class="p">,</span>
|
||||
<span class="n">lights</span><span class="o">=</span><span class="n">lights</span>
|
||||
<span class="p">)</span>
|
||||
<span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>Render the textured mesh we created from the SMPL model and texture map.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">images</span> <span class="o">=</span> <span class="n">renderer</span><span class="p">(</span><span class="n">mesh</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">images</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h3 id="Different-view-and-lighting-of-the-body">Different view and lighting of the body<a class="anchor-link" href="#Different-view-and-lighting-of-the-body">¶</a></h3><p>We can also change many other settings in the rendering pipeline. Here we:</p>
|
||||
<ul>
|
||||
<li>change the <strong>viewing angle</strong> of the camera</li>
|
||||
<li>change the <strong>position</strong> of the point light</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Rotate the person by increasing the elevation and azimuth angles to view the back of the person from above. </span>
|
||||
<span class="n">R</span><span class="p">,</span> <span class="n">T</span> <span class="o">=</span> <span class="n">look_at_view_transform</span><span class="p">(</span><span class="mf">2.7</span><span class="p">,</span> <span class="mi">10</span><span class="p">,</span> <span class="mi">180</span><span class="p">)</span>
|
||||
<span class="n">cameras</span> <span class="o">=</span> <span class="n">FoVPerspectiveCameras</span><span class="p">(</span><span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span> <span class="n">R</span><span class="o">=</span><span class="n">R</span><span class="p">,</span> <span class="n">T</span><span class="o">=</span><span class="n">T</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Move the light location so the light is shining on the person's back. </span>
|
||||
<span class="n">lights</span><span class="o">.</span><span class="n">location</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([[</span><span class="mf">2.0</span><span class="p">,</span> <span class="mf">2.0</span><span class="p">,</span> <span class="o">-</span><span class="mf">2.0</span><span class="p">]],</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Re render the mesh, passing in keyword arguments for the modified components.</span>
|
||||
<span class="n">images</span> <span class="o">=</span> <span class="n">renderer</span><span class="p">(</span><span class="n">mesh</span><span class="p">,</span> <span class="n">lights</span><span class="o">=</span><span class="n">lights</span><span class="p">,</span> <span class="n">cameras</span><span class="o">=</span><span class="n">cameras</span><span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">images</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="Conclusion">Conclusion<a class="anchor-link" href="#Conclusion">¶</a></h2><p>In this tutorial, we've learned how to construct a <strong>textured mesh</strong> from <strong>DensePose model and uv data</strong>, as well as initialize a <strong>Renderer</strong> and change the viewing angle and lighting of our rendered mesh.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div></div></div></div></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2020 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>
|
||||
491
tutorials/render_densepose/index.html
Normal file
491
tutorials/render_densepose/index.html
Normal file
@@ -0,0 +1,491 @@
|
||||
<!DOCTYPE html><html lang=""><head><meta charSet="utf-8"/><meta http-equiv="X-UA-Compatible" content="IE=edge"/><title>PyTorch3D · A library for deep learning with 3D data</title><meta name="viewport" content="width=device-width"/><meta name="generator" content="Docusaurus"/><meta name="description" content="A library for deep learning with 3D data"/><meta property="og:title" content="PyTorch3D · A library for deep learning with 3D data"/><meta property="og:type" content="website"/><meta property="og:url" content="https://pytorch3d.org/"/><meta property="og:description" content="A library for deep learning with 3D data"/><meta property="og:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><meta name="twitter:card" content="summary"/><meta name="twitter:image" content="https://pytorch3d.org/img/pytorch3dlogoicon.svg"/><link rel="shortcut icon" href="/img/pytorch3dfavicon.png"/><link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css"/><script>
|
||||
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
|
||||
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
|
||||
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
|
||||
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
var links = coll[i].nextElementSibling.getElementsByTagName('*');
|
||||
if (checkActiveCategory){
|
||||
for (var j = 0; j < links.length; j++) {
|
||||
if (links[j].classList.contains('navListItemActive')){
|
||||
coll[i].nextElementSibling.classList.toggle('hide');
|
||||
coll[i].childNodes[1].classList.toggle('rotate');
|
||||
checkActiveCategory = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
coll[i].addEventListener('click', function() {
|
||||
var arrow = this.childNodes[1];
|
||||
arrow.classList.toggle('rotate');
|
||||
var content = this.nextElementSibling;
|
||||
content.classList.toggle('hide');
|
||||
});
|
||||
}
|
||||
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
createToggler('#navToggler', '#docsNav', 'docsSliderActive');
|
||||
createToggler('#tocToggler', 'body', 'tocActive');
|
||||
|
||||
var headings = document.querySelector('.toc-headings');
|
||||
headings && headings.addEventListener('click', function(event) {
|
||||
var el = event.target;
|
||||
while(el !== headings){
|
||||
if (el.tagName === 'A') {
|
||||
document.body.classList.remove('tocActive');
|
||||
break;
|
||||
} else{
|
||||
el = el.parentNode;
|
||||
}
|
||||
}
|
||||
}, false);
|
||||
|
||||
function createToggler(togglerSelector, targetSelector, className) {
|
||||
var toggler = document.querySelector(togglerSelector);
|
||||
var target = document.querySelector(targetSelector);
|
||||
|
||||
if (!toggler) {
|
||||
return;
|
||||
}
|
||||
|
||||
toggler.onclick = function(event) {
|
||||
event.preventDefault();
|
||||
|
||||
target.classList.toggle(className);
|
||||
};
|
||||
}
|
||||
});
|
||||
</script></nav></div><div class="container mainContainer"><div class="wrapper"><div class="tutorialButtonsWrapper"><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="https://colab.research.google.com/github/facebookresearch/pytorch3d/blob/stable/docs/tutorials/render_densepose.ipynb" target="_blank"><img class="colabButton" align="left" src="/img/colab_icon.png"/>Run in Google Colab</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/render_densepose.ipynb" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Jupyter Notebook</a></div><div class="tutorialButtonWrapper buttonWrapper"><a class="tutorialButton button" download="" href="/files/render_densepose.py" target="_blank"><svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="file-download" class="svg-inline--fa fa-file-download fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M224 136V0H24C10.7 0 0 10.7 0 24v464c0 13.3 10.7 24 24 24h336c13.3 0 24-10.7 24-24V160H248c-13.2 0-24-10.8-24-24zm76.45 211.36l-96.42 95.7c-6.65 6.61-17.39 6.61-24.04 0l-96.42-95.7C73.42 337.29 80.54 320 94.82 320H160v-80c0-8.84 7.16-16 16-16h32c8.84 0 16 7.16 16 16v80h65.18c14.28 0 21.4 17.29 11.27 27.36zM377 105L279.1 7c-4.5-4.5-10.6-7-17-7H256v128h128v-6.1c0-6.3-2.5-12.4-7-16.9z"></path></svg>Download Tutorial Source Code</a></div></div><div class="tutorialBody">
|
||||
<script
|
||||
src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js">
|
||||
</script>
|
||||
<script
|
||||
src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js">
|
||||
</script>
|
||||
<div class="notebook">
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h1 id="Render-DensePose">Render DensePose<a class="anchor-link" href="#Render-DensePose">¶</a></h1><p>DensePose refers to dense human pose representation: <a href="https://github.com/facebookresearch/DensePose">https://github.com/facebookresearch/DensePose</a>.
|
||||
In this tutorial, we provide an example of using DensePose data in PyTorch3D.</p>
|
||||
<p>This tutorial shows how to:</p>
|
||||
<ul>
|
||||
<li>load a mesh and textures from densepose <code>.mat</code> and <code>.pkl</code> files</li>
|
||||
<li>set up a renderer </li>
|
||||
<li>render the mesh </li>
|
||||
<li>vary the rendering settings such as lighting and camera position</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="Import-modules">Import modules<a class="anchor-link" href="#Import-modules">¶</a></h2>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>If torch, torchvision and PyTorch3D are not installed, run the following cell:</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># We also install chumpy as it is needed to load the SMPL model pickle file.</span>
|
||||
<span class="o">!</span>pip install chumpy
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="kn">import</span> <span class="nn">matplotlib.pyplot</span> <span class="k">as</span> <span class="nn">plt</span>
|
||||
<span class="kn">from</span> <span class="nn">skimage.io</span> <span class="k">import</span> <span class="n">imread</span>
|
||||
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
|
||||
|
||||
<span class="c1"># libraries for reading data from files</span>
|
||||
<span class="kn">from</span> <span class="nn">scipy.io</span> <span class="k">import</span> <span class="n">loadmat</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.io.utils</span> <span class="k">import</span> <span class="n">_read_image</span>
|
||||
<span class="kn">import</span> <span class="nn">pickle</span>
|
||||
|
||||
<span class="c1"># Data structures and functions for rendering</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.structures</span> <span class="k">import</span> <span class="n">Meshes</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.renderer</span> <span class="k">import</span> <span class="p">(</span>
|
||||
<span class="n">look_at_view_transform</span><span class="p">,</span>
|
||||
<span class="n">FoVPerspectiveCameras</span><span class="p">,</span>
|
||||
<span class="n">PointLights</span><span class="p">,</span>
|
||||
<span class="n">DirectionalLights</span><span class="p">,</span>
|
||||
<span class="n">Materials</span><span class="p">,</span>
|
||||
<span class="n">RasterizationSettings</span><span class="p">,</span>
|
||||
<span class="n">MeshRenderer</span><span class="p">,</span>
|
||||
<span class="n">MeshRasterizer</span><span class="p">,</span>
|
||||
<span class="n">SoftPhongShader</span><span class="p">,</span>
|
||||
<span class="n">TexturesUV</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
<span class="c1"># add path for demo utils functions </span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="n">sys</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">abspath</span><span class="p">(</span><span class="s1">''</span><span class="p">))</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="Load-the-SMPL-model">Load the SMPL model<a class="anchor-link" href="#Load-the-SMPL-model">¶</a></h2><h4 id="Download-the-SMPL-model">Download the SMPL model<a class="anchor-link" href="#Download-the-SMPL-model">¶</a></h4><ul>
|
||||
<li>Go to <a href="http://smpl.is.tue.mpg.de/downloads">http://smpl.is.tue.mpg.de/downloads</a> and sign up.</li>
|
||||
<li>Download SMPL for Python Users and unzip.</li>
|
||||
<li>Copy the file male template file <strong>'models/basicModel_m_lbs_10_207_0_v1.0.0.pkl'</strong> to the data/DensePose/ folder.<ul>
|
||||
<li>rename the file to <strong>'smpl_model.pkl'</strong> or rename the string where it's commented below</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<p>If running this notebook using Google Colab, run the following cell to fetch the texture and UV values and save it at the correct path.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Texture image</span>
|
||||
<span class="o">!</span>wget -P data/DensePose https://raw.githubusercontent.com/facebookresearch/DensePose/master/DensePoseData/demo_data/texture_from_SURREAL.png
|
||||
|
||||
<span class="c1"># UV_processed.mat</span>
|
||||
<span class="o">!</span>wget https://dl.fbaipublicfiles.com/densepose/densepose_uv_data.tar.gz
|
||||
<span class="o">!</span>tar xvf densepose_uv_data.tar.gz -C data/DensePose
|
||||
<span class="o">!</span>rm densepose_uv_data.tar.gz
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>Load our texture UV data and our SMPL data, with some processing to correct data values and format.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Setup</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">is_available</span><span class="p">():</span>
|
||||
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cuda:0"</span><span class="p">)</span>
|
||||
<span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">set_device</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">"cpu"</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Set paths</span>
|
||||
<span class="n">DATA_DIR</span> <span class="o">=</span> <span class="s2">"./data"</span>
|
||||
<span class="n">data_filename</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">DATA_DIR</span><span class="p">,</span> <span class="s2">"DensePose/UV_Processed.mat"</span><span class="p">)</span>
|
||||
<span class="n">tex_filename</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">DATA_DIR</span><span class="p">,</span><span class="s2">"DensePose/texture_from_SURREAL.png"</span><span class="p">)</span>
|
||||
<span class="c1"># rename your .pkl file or change this string</span>
|
||||
<span class="n">verts_filename</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">DATA_DIR</span><span class="p">,</span> <span class="s2">"DensePose/smpl_model.pkl"</span><span class="p">)</span>
|
||||
|
||||
|
||||
<span class="c1"># Load SMPL and texture data</span>
|
||||
<span class="k">with</span> <span class="nb">open</span><span class="p">(</span><span class="n">verts_filename</span><span class="p">,</span> <span class="s1">'rb'</span><span class="p">)</span> <span class="k">as</span> <span class="n">f</span><span class="p">:</span>
|
||||
<span class="n">data</span> <span class="o">=</span> <span class="n">pickle</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="n">f</span><span class="p">,</span> <span class="n">encoding</span><span class="o">=</span><span class="s1">'latin1'</span><span class="p">)</span>
|
||||
<span class="n">v_template</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">(</span><span class="n">data</span><span class="p">[</span><span class="s1">'v_template'</span><span class="p">])</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span> <span class="c1"># (6890, 3)</span>
|
||||
<span class="n">ALP_UV</span> <span class="o">=</span> <span class="n">loadmat</span><span class="p">(</span><span class="n">data_filename</span><span class="p">)</span>
|
||||
<span class="n">tex</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">from_numpy</span><span class="p">(</span><span class="n">_read_image</span><span class="p">(</span><span class="n">file_name</span><span class="o">=</span><span class="n">tex_filename</span><span class="p">,</span> <span class="nb">format</span><span class="o">=</span><span class="s1">'RGB'</span><span class="p">)</span> <span class="o">/</span> <span class="mf">255.</span> <span class="p">)</span><span class="o">.</span><span class="n">unsqueeze</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
|
||||
|
||||
<span class="n">verts</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">from_numpy</span><span class="p">((</span><span class="n">ALP_UV</span><span class="p">[</span><span class="s2">"All_vertices"</span><span class="p">])</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="nb">int</span><span class="p">))</span><span class="o">.</span><span class="n">squeeze</span><span class="p">()</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span> <span class="c1"># (7829, 1)</span>
|
||||
<span class="n">U</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">(</span><span class="n">ALP_UV</span><span class="p">[</span><span class="s1">'All_U_norm'</span><span class="p">])</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span> <span class="c1"># (7829, 1)</span>
|
||||
<span class="n">V</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">(</span><span class="n">ALP_UV</span><span class="p">[</span><span class="s1">'All_V_norm'</span><span class="p">])</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span> <span class="c1"># (7829, 1)</span>
|
||||
<span class="n">faces</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">from_numpy</span><span class="p">((</span><span class="n">ALP_UV</span><span class="p">[</span><span class="s1">'All_Faces'</span><span class="p">]</span> <span class="o">-</span> <span class="mi">1</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="nb">int</span><span class="p">))</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span> <span class="c1"># (13774, 3)</span>
|
||||
<span class="n">face_indices</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">(</span><span class="n">ALP_UV</span><span class="p">[</span><span class="s1">'All_FaceIndices'</span><span class="p">])</span><span class="o">.</span><span class="n">squeeze</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Display the texture image</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">tex</span><span class="o">.</span><span class="n">squeeze</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span><span class="o">.</span><span class="n">cpu</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>In DensePose, the body mesh is split into 24 parts. In the texture image, we can see the 24 parts are separated out into individual (200, 200) images per body part. The convention in DensePose is that each face in the mesh is associated with a body part (given by the face_indices tensor above). The vertex UV values (in the range [0, 1]) for each face are specific to the (200, 200) size texture map for the part of the body that the mesh face corresponds to. We cannot use them directly with the entire texture map. We have to offset the vertex UV values depending on what body part the associated face corresponds to.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Map each face to a (u, v) offset</span>
|
||||
<span class="n">offset_per_part</span> <span class="o">=</span> <span class="p">{}</span>
|
||||
<span class="n">already_offset</span> <span class="o">=</span> <span class="nb">set</span><span class="p">()</span>
|
||||
<span class="n">cols</span><span class="p">,</span> <span class="n">rows</span> <span class="o">=</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">6</span>
|
||||
<span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">u</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">linspace</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">cols</span><span class="p">,</span> <span class="n">endpoint</span><span class="o">=</span><span class="kc">False</span><span class="p">)):</span>
|
||||
<span class="k">for</span> <span class="n">j</span><span class="p">,</span> <span class="n">v</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">linspace</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="n">rows</span><span class="p">,</span> <span class="n">endpoint</span><span class="o">=</span><span class="kc">False</span><span class="p">)):</span>
|
||||
<span class="n">part</span> <span class="o">=</span> <span class="n">rows</span> <span class="o">*</span> <span class="n">i</span> <span class="o">+</span> <span class="n">j</span> <span class="o">+</span> <span class="mi">1</span> <span class="c1"># parts are 1-indexed in face_indices</span>
|
||||
<span class="n">offset_per_part</span><span class="p">[</span><span class="n">part</span><span class="p">]</span> <span class="o">=</span> <span class="p">(</span><span class="n">u</span><span class="p">,</span> <span class="n">v</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># iterate over faces and offset the corresponding vertex u and v values</span>
|
||||
<span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">faces</span><span class="p">)):</span>
|
||||
<span class="n">face_vert_idxs</span> <span class="o">=</span> <span class="n">faces</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
|
||||
<span class="n">part</span> <span class="o">=</span> <span class="n">face_indices</span><span class="p">[</span><span class="n">i</span><span class="p">]</span>
|
||||
<span class="n">offset_u</span><span class="p">,</span> <span class="n">offset_v</span> <span class="o">=</span> <span class="n">offset_per_part</span><span class="p">[</span><span class="nb">int</span><span class="p">(</span><span class="n">part</span><span class="o">.</span><span class="n">item</span><span class="p">())]</span>
|
||||
|
||||
<span class="k">for</span> <span class="n">vert_idx</span> <span class="ow">in</span> <span class="n">face_vert_idxs</span><span class="p">:</span>
|
||||
<span class="c1"># vertices are reused, but we don't want to offset multiple times</span>
|
||||
<span class="k">if</span> <span class="n">vert_idx</span><span class="o">.</span><span class="n">item</span><span class="p">()</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">already_offset</span><span class="p">:</span>
|
||||
<span class="c1"># offset u value</span>
|
||||
<span class="n">U</span><span class="p">[</span><span class="n">vert_idx</span><span class="p">]</span> <span class="o">=</span> <span class="n">U</span><span class="p">[</span><span class="n">vert_idx</span><span class="p">]</span> <span class="o">/</span> <span class="n">cols</span> <span class="o">+</span> <span class="n">offset_u</span>
|
||||
<span class="c1"># offset v value</span>
|
||||
<span class="c1"># this also flips each part locally, as each part is upside down</span>
|
||||
<span class="n">V</span><span class="p">[</span><span class="n">vert_idx</span><span class="p">]</span> <span class="o">=</span> <span class="p">(</span><span class="mi">1</span> <span class="o">-</span> <span class="n">V</span><span class="p">[</span><span class="n">vert_idx</span><span class="p">])</span> <span class="o">/</span> <span class="n">rows</span> <span class="o">+</span> <span class="n">offset_v</span>
|
||||
<span class="c1"># add vertex to our set tracking offsetted vertices</span>
|
||||
<span class="n">already_offset</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">vert_idx</span><span class="o">.</span><span class="n">item</span><span class="p">())</span>
|
||||
|
||||
<span class="c1"># invert V values</span>
|
||||
<span class="n">U_norm</span><span class="p">,</span> <span class="n">V_norm</span> <span class="o">=</span> <span class="n">U</span><span class="p">,</span> <span class="mi">1</span> <span class="o">-</span> <span class="n">V</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># create our verts_uv values</span>
|
||||
<span class="n">verts_uv</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cat</span><span class="p">([</span><span class="n">U_norm</span><span class="p">[</span><span class="kc">None</span><span class="p">],</span><span class="n">V_norm</span><span class="p">[</span><span class="kc">None</span><span class="p">]],</span> <span class="n">dim</span><span class="o">=</span><span class="mi">2</span><span class="p">)</span> <span class="c1"># (1, 7829, 2)</span>
|
||||
|
||||
<span class="c1"># There are 6890 xyz vertex coordinates but 7829 vertex uv coordinates. </span>
|
||||
<span class="c1"># This is because the same vertex can be shared by multiple faces where each face may correspond to a different body part. </span>
|
||||
<span class="c1"># Therefore when initializing the Meshes class,</span>
|
||||
<span class="c1"># we need to map each of the vertices referenced by the DensePose faces (in verts, which is the "All_vertices" field)</span>
|
||||
<span class="c1"># to the correct xyz coordinate in the SMPL template mesh.</span>
|
||||
<span class="n">v_template_extended</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">stack</span><span class="p">(</span><span class="nb">list</span><span class="p">(</span><span class="nb">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">vert</span><span class="p">:</span> <span class="n">v_template</span><span class="p">[</span><span class="n">vert</span><span class="o">-</span><span class="mi">1</span><span class="p">],</span> <span class="n">verts</span><span class="p">)))</span><span class="o">.</span><span class="n">unsqueeze</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span> <span class="c1"># (1, 7829, 3)</span>
|
||||
|
||||
<span class="c1"># add a batch dimension to faces</span>
|
||||
<span class="n">faces</span> <span class="o">=</span> <span class="n">faces</span><span class="o">.</span><span class="n">unsqueeze</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h3 id="Create-our-textured-mesh">Create our textured mesh<a class="anchor-link" href="#Create-our-textured-mesh">¶</a></h3><p><strong>Meshes</strong> is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes.</p>
|
||||
<p><strong>TexturesUV</strong> is an auxillary datastructure for storing vertex uv and texture maps for meshes.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">texture</span> <span class="o">=</span> <span class="n">TexturesUV</span><span class="p">(</span><span class="n">maps</span><span class="o">=</span><span class="n">tex</span><span class="p">,</span> <span class="n">faces_uvs</span><span class="o">=</span><span class="n">faces</span><span class="p">,</span> <span class="n">verts_uvs</span><span class="o">=</span><span class="n">verts_uv</span><span class="p">)</span>
|
||||
<span class="n">mesh</span> <span class="o">=</span> <span class="n">Meshes</span><span class="p">(</span><span class="n">v_template_extended</span><span class="p">,</span> <span class="n">faces</span><span class="p">,</span> <span class="n">texture</span><span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="Create-a-renderer">Create a renderer<a class="anchor-link" href="#Create-a-renderer">¶</a></h2>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Initialize a camera.</span>
|
||||
<span class="c1"># World coordinates +Y up, +X left and +Z in.</span>
|
||||
<span class="n">R</span><span class="p">,</span> <span class="n">T</span> <span class="o">=</span> <span class="n">look_at_view_transform</span><span class="p">(</span><span class="mf">2.7</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">)</span>
|
||||
<span class="n">cameras</span> <span class="o">=</span> <span class="n">FoVPerspectiveCameras</span><span class="p">(</span><span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span> <span class="n">R</span><span class="o">=</span><span class="n">R</span><span class="p">,</span> <span class="n">T</span><span class="o">=</span><span class="n">T</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Define the settings for rasterization and shading. Here we set the output image to be of size</span>
|
||||
<span class="c1"># 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1</span>
|
||||
<span class="c1"># and blur_radius=0.0. </span>
|
||||
<span class="n">raster_settings</span> <span class="o">=</span> <span class="n">RasterizationSettings</span><span class="p">(</span>
|
||||
<span class="n">image_size</span><span class="o">=</span><span class="mi">512</span><span class="p">,</span>
|
||||
<span class="n">blur_radius</span><span class="o">=</span><span class="mf">0.0</span><span class="p">,</span>
|
||||
<span class="n">faces_per_pixel</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
<span class="c1"># Place a point light in front of the person. </span>
|
||||
<span class="n">lights</span> <span class="o">=</span> <span class="n">PointLights</span><span class="p">(</span><span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span> <span class="n">location</span><span class="o">=</span><span class="p">[[</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">,</span> <span class="mf">2.0</span><span class="p">]])</span>
|
||||
|
||||
<span class="c1"># Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will </span>
|
||||
<span class="c1"># interpolate the texture uv coordinates for each vertex, sample from a texture image and </span>
|
||||
<span class="c1"># apply the Phong lighting model</span>
|
||||
<span class="n">renderer</span> <span class="o">=</span> <span class="n">MeshRenderer</span><span class="p">(</span>
|
||||
<span class="n">rasterizer</span><span class="o">=</span><span class="n">MeshRasterizer</span><span class="p">(</span>
|
||||
<span class="n">cameras</span><span class="o">=</span><span class="n">cameras</span><span class="p">,</span>
|
||||
<span class="n">raster_settings</span><span class="o">=</span><span class="n">raster_settings</span>
|
||||
<span class="p">),</span>
|
||||
<span class="n">shader</span><span class="o">=</span><span class="n">SoftPhongShader</span><span class="p">(</span>
|
||||
<span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span>
|
||||
<span class="n">cameras</span><span class="o">=</span><span class="n">cameras</span><span class="p">,</span>
|
||||
<span class="n">lights</span><span class="o">=</span><span class="n">lights</span>
|
||||
<span class="p">)</span>
|
||||
<span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>Render the textured mesh we created from the SMPL model and texture map.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">images</span> <span class="o">=</span> <span class="n">renderer</span><span class="p">(</span><span class="n">mesh</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">images</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h3 id="Different-view-and-lighting-of-the-body">Different view and lighting of the body<a class="anchor-link" href="#Different-view-and-lighting-of-the-body">¶</a></h3><p>We can also change many other settings in the rendering pipeline. Here we:</p>
|
||||
<ul>
|
||||
<li>change the <strong>viewing angle</strong> of the camera</li>
|
||||
<li>change the <strong>position</strong> of the point light</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># Rotate the person by increasing the elevation and azimuth angles to view the back of the person from above. </span>
|
||||
<span class="n">R</span><span class="p">,</span> <span class="n">T</span> <span class="o">=</span> <span class="n">look_at_view_transform</span><span class="p">(</span><span class="mf">2.7</span><span class="p">,</span> <span class="mi">10</span><span class="p">,</span> <span class="mi">180</span><span class="p">)</span>
|
||||
<span class="n">cameras</span> <span class="o">=</span> <span class="n">FoVPerspectiveCameras</span><span class="p">(</span><span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">,</span> <span class="n">R</span><span class="o">=</span><span class="n">R</span><span class="p">,</span> <span class="n">T</span><span class="o">=</span><span class="n">T</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Move the light location so the light is shining on the person's back. </span>
|
||||
<span class="n">lights</span><span class="o">.</span><span class="n">location</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([[</span><span class="mf">2.0</span><span class="p">,</span> <span class="mf">2.0</span><span class="p">,</span> <span class="o">-</span><span class="mf">2.0</span><span class="p">]],</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># Re render the mesh, passing in keyword arguments for the modified components.</span>
|
||||
<span class="n">images</span> <span class="o">=</span> <span class="n">renderer</span><span class="p">(</span><span class="n">mesh</span><span class="p">,</span> <span class="n">lights</span><span class="o">=</span><span class="n">lights</span><span class="p">,</span> <span class="n">cameras</span><span class="o">=</span><span class="n">cameras</span><span class="p">)</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">10</span><span class="p">))</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">images</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="o">...</span><span class="p">,</span> <span class="p">:</span><span class="mi">3</span><span class="p">]</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="Conclusion">Conclusion<a class="anchor-link" href="#Conclusion">¶</a></h2><p>In this tutorial, we've learned how to construct a <strong>textured mesh</strong> from <strong>DensePose model and uv data</strong>, as well as initialize a <strong>Renderer</strong> and change the viewing angle and lighting of our rendered mesh.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div></div></div></div></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2020 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a mesh with texture via rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
@@ -115,12 +115,22 @@
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -141,6 +151,8 @@
|
||||
|
||||
<span class="c1"># Data structures and functions for rendering</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.structures</span> <span class="k">import</span> <span class="n">Meshes</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.vis.plotly_vis</span> <span class="k">import</span> <span class="n">AxisArgs</span><span class="p">,</span> <span class="n">plot_batch_individually</span><span class="p">,</span> <span class="n">plot_scene</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.vis.texture_vis</span> <span class="k">import</span> <span class="n">texturesuv_image_matplotlib</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.renderer</span> <span class="k">import</span> <span class="p">(</span>
|
||||
<span class="n">look_at_view_transform</span><span class="p">,</span>
|
||||
<span class="n">FoVPerspectiveCameras</span><span class="p">,</span>
|
||||
@@ -151,7 +163,8 @@
|
||||
<span class="n">MeshRenderer</span><span class="p">,</span>
|
||||
<span class="n">MeshRasterizer</span><span class="p">,</span>
|
||||
<span class="n">SoftPhongShader</span><span class="p">,</span>
|
||||
<span class="n">TexturesUV</span>
|
||||
<span class="n">TexturesUV</span><span class="p">,</span>
|
||||
<span class="n">TexturesVertex</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
<span class="c1"># add path for demo utils functions </span>
|
||||
@@ -250,7 +263,6 @@ If running locally, the data is already available at the correct path.</p>
|
||||
|
||||
<span class="c1"># Load obj file</span>
|
||||
<span class="n">mesh</span> <span class="o">=</span> <span class="n">load_objs_as_meshes</span><span class="p">([</span><span class="n">obj_filename</span><span class="p">],</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
|
||||
<span class="n">texture_image</span><span class="o">=</span><span class="n">mesh</span><span class="o">.</span><span class="n">textures</span><span class="o">.</span><span class="n">maps_padded</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -269,9 +281,31 @@ If running locally, the data is already available at the correct path.</p>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">7</span><span class="p">,</span><span class="mi">7</span><span class="p">))</span>
|
||||
<span class="n">texture_image</span><span class="o">=</span><span class="n">mesh</span><span class="o">.</span><span class="n">textures</span><span class="o">.</span><span class="n">maps_padded</span><span class="p">()</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">texture_image</span><span class="o">.</span><span class="n">squeeze</span><span class="p">()</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s1">'off'</span><span class="p">);</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>PyTorch3D has a built-in way to view the texture map with matplotlib along with the points on the map corresponding to vertices. There is also a method, texturesuv_image_PIL, to get a similar image which can be saved to a file.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">7</span><span class="p">,</span><span class="mi">7</span><span class="p">))</span>
|
||||
<span class="n">texturesuv_image_matplotlib</span><span class="p">(</span><span class="n">mesh</span><span class="o">.</span><span class="n">textures</span><span class="p">,</span> <span class="n">subsample</span><span class="o">=</span><span class="kc">None</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -513,7 +547,201 @@ The renderer and associated components can take batched inputs and <strong>rende
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="7.-Conclusion">7. Conclusion<a class="anchor-link" href="#7.-Conclusion">¶</a></h2><p>In this tutorial we learnt how to <strong>load</strong> a textured mesh from an obj file, initialize a PyTorch3D datastructure called <strong>Meshes</strong>, set up an <strong>Renderer</strong> consisting of a <strong>Rasterizer</strong> and a <strong>Shader</strong>, and modify several components of the rendering pipeline.</p>
|
||||
<h2 id="7.-Plotly-visualization">7. Plotly visualization<a class="anchor-link" href="#7.-Plotly-visualization">¶</a></h2><p>If you only want to visualize a mesh, you don't really need to use a differentiable renderer - instead we support plotting of Meshes with plotly. For these Meshes, we use TexturesVertex to define a texture for the rendering.
|
||||
<code>plot_meshes</code> creates a Plotly figure with a trace for each Meshes object.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">verts</span><span class="p">,</span> <span class="n">faces_idx</span><span class="p">,</span> <span class="n">_</span> <span class="o">=</span> <span class="n">load_obj</span><span class="p">(</span><span class="n">obj_filename</span><span class="p">)</span>
|
||||
<span class="n">faces</span> <span class="o">=</span> <span class="n">faces_idx</span><span class="o">.</span><span class="n">verts_idx</span>
|
||||
|
||||
<span class="c1"># Initialize each vertex to be white in color.</span>
|
||||
<span class="n">verts_rgb</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">ones_like</span><span class="p">(</span><span class="n">verts</span><span class="p">)[</span><span class="kc">None</span><span class="p">]</span> <span class="c1"># (1, V, 3)</span>
|
||||
<span class="n">textures</span> <span class="o">=</span> <span class="n">TexturesVertex</span><span class="p">(</span><span class="n">verts_features</span><span class="o">=</span><span class="n">verts_rgb</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">))</span>
|
||||
|
||||
<span class="c1"># Create a Meshes object</span>
|
||||
<span class="n">mesh</span> <span class="o">=</span> <span class="n">Meshes</span><span class="p">(</span>
|
||||
<span class="n">verts</span><span class="o">=</span><span class="p">[</span><span class="n">verts</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)],</span>
|
||||
<span class="n">faces</span><span class="o">=</span><span class="p">[</span><span class="n">faces</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)],</span>
|
||||
<span class="n">textures</span><span class="o">=</span><span class="n">textures</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
<span class="c1"># Render the plotly figure</span>
|
||||
<span class="n">fig</span> <span class="o">=</span> <span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"subplot1"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"cow_mesh"</span><span class="p">:</span> <span class="n">mesh</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">})</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># use Plotly's default colors (no texture)</span>
|
||||
<span class="n">mesh</span> <span class="o">=</span> <span class="n">Meshes</span><span class="p">(</span>
|
||||
<span class="n">verts</span><span class="o">=</span><span class="p">[</span><span class="n">verts</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)],</span>
|
||||
<span class="n">faces</span><span class="o">=</span><span class="p">[</span><span class="n">faces</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)]</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
<span class="c1"># Render the plotly figure</span>
|
||||
<span class="n">fig</span> <span class="o">=</span> <span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"subplot1"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"cow_mesh"</span><span class="p">:</span> <span class="n">mesh</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">})</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># create a batch of meshes, and offset one to prevent overlap</span>
|
||||
<span class="n">mesh_batch</span> <span class="o">=</span> <span class="n">Meshes</span><span class="p">(</span>
|
||||
<span class="n">verts</span><span class="o">=</span><span class="p">[</span><span class="n">verts</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">),</span> <span class="p">(</span><span class="n">verts</span> <span class="o">+</span> <span class="mi">2</span><span class="p">)</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)],</span>
|
||||
<span class="n">faces</span><span class="o">=</span><span class="p">[</span><span class="n">faces</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">),</span> <span class="n">faces</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)]</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
<span class="c1"># plot mesh batch in the same trace</span>
|
||||
<span class="n">fig</span> <span class="o">=</span> <span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"subplot1"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"cow_mesh_batch"</span><span class="p">:</span> <span class="n">mesh_batch</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">})</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># plot batch of meshes in different traces</span>
|
||||
<span class="n">fig</span> <span class="o">=</span> <span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"subplot1"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"cow_mesh1"</span><span class="p">:</span> <span class="n">mesh_batch</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
|
||||
<span class="s2">"cow_mesh2"</span><span class="p">:</span> <span class="n">mesh_batch</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">})</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># plot batch of meshes in different subplots</span>
|
||||
<span class="n">fig</span> <span class="o">=</span> <span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"subplot1"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"cow_mesh1"</span><span class="p">:</span> <span class="n">mesh_batch</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
|
||||
<span class="p">},</span>
|
||||
<span class="s2">"subplot2"</span><span class="p">:{</span>
|
||||
<span class="s2">"cow_mesh2"</span><span class="p">:</span> <span class="n">mesh_batch</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">})</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>For batches, we can also use <code>plot_batch_individually</code> to avoid constructing the scene dictionary ourselves.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># extend the batch to have 4 meshes</span>
|
||||
<span class="n">mesh_4</span> <span class="o">=</span> <span class="n">mesh_batch</span><span class="o">.</span><span class="n">extend</span><span class="p">(</span><span class="mi">2</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># visualize the batch in different subplots, 2 per row</span>
|
||||
<span class="n">fig</span> <span class="o">=</span> <span class="n">plot_batch_individually</span><span class="p">(</span><span class="n">mesh_4</span><span class="p">)</span>
|
||||
<span class="c1"># we can update the figure height and width</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">update_layout</span><span class="p">(</span><span class="n">height</span><span class="o">=</span><span class="mi">1000</span><span class="p">,</span> <span class="n">width</span><span class="o">=</span><span class="mi">500</span><span class="p">)</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>We can also modify the axis arguments and axis backgrounds in both functions.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">fig2</span> <span class="o">=</span> <span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"cow_plot1"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"cows"</span><span class="p">:</span> <span class="n">mesh_batch</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">},</span>
|
||||
<span class="n">xaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(200, 200, 230)"</span><span class="p">},</span>
|
||||
<span class="n">yaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(230, 200, 200)"</span><span class="p">},</span>
|
||||
<span class="n">zaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(200, 230, 200)"</span><span class="p">},</span>
|
||||
<span class="n">axis_args</span><span class="o">=</span><span class="n">AxisArgs</span><span class="p">(</span><span class="n">showgrid</span><span class="o">=</span><span class="kc">True</span><span class="p">))</span>
|
||||
<span class="n">fig2</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">fig3</span> <span class="o">=</span> <span class="n">plot_batch_individually</span><span class="p">(</span>
|
||||
<span class="n">mesh_4</span><span class="p">,</span>
|
||||
<span class="n">ncols</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span>
|
||||
<span class="n">subplot_titles</span> <span class="o">=</span> <span class="p">[</span><span class="s2">"cow1"</span><span class="p">,</span> <span class="s2">"cow2"</span><span class="p">,</span> <span class="s2">"cow3"</span><span class="p">,</span> <span class="s2">"cow4"</span><span class="p">],</span> <span class="c1"># customize subplot titles</span>
|
||||
<span class="n">xaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(200, 200, 230)"</span><span class="p">},</span>
|
||||
<span class="n">yaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(230, 200, 200)"</span><span class="p">},</span>
|
||||
<span class="n">zaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(200, 230, 200)"</span><span class="p">},</span>
|
||||
<span class="n">axis_args</span><span class="o">=</span><span class="n">AxisArgs</span><span class="p">(</span><span class="n">showgrid</span><span class="o">=</span><span class="kc">True</span><span class="p">))</span>
|
||||
<span class="n">fig3</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="8.-Conclusion">8. Conclusion<a class="anchor-link" href="#8.-Conclusion">¶</a></h2><p>In this tutorial we learnt how to <strong>load</strong> a textured mesh from an obj file, initialize a PyTorch3D datastructure called <strong>Meshes</strong>, set up an <strong>Renderer</strong> consisting of a <strong>Rasterizer</strong> and a <strong>Shader</strong>, and modify several components of the rendering pipeline. We also learned how to render Meshes in Plotly figures.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a mesh with texture via rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class=""><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="container docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span></span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Tutorials</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/">Overview</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">3D operators</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/deform_source_mesh_to_target_mesh">Fit Mesh</a></li><li class="navListItem"><a class="navItem" href="/tutorials/bundle_adjustment">Bundle Adjustment</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Rendering</h3><ul class=""><li class="navListItem navListItemActive"><a class="navItem" href="/tutorials/render_textured_meshes">Render Textured Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_densepose">Render DensePose Meshes</a></li><li class="navListItem"><a class="navItem" href="/tutorials/render_colored_points">Render Colored Pointclouds</a></li><li class="navListItem"><a class="navItem" href="/tutorials/fit_textured_mesh">Fit a Mesh with Texture via Rendering</a></li><li class="navListItem"><a class="navItem" href="/tutorials/camera_position_optimization_with_differentiable_rendering">Camera Position Optimization with Differentiable Rendering</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Dataloaders</h3><ul class=""><li class="navListItem"><a class="navItem" href="/tutorials/dataloaders_ShapeNetCore_R2N2">Data loaders for ShapeNetCore and R2N2</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
@@ -115,12 +115,22 @@
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="o">!</span>pip install torch torchvision
|
||||
<span class="kn">import</span> <span class="nn">os</span>
|
||||
<span class="kn">import</span> <span class="nn">sys</span>
|
||||
<span class="kn">import</span> <span class="nn">torch</span>
|
||||
<span class="k">if</span> <span class="n">torch</span><span class="o">.</span><span class="n">__version__</span><span class="o">==</span><span class="s1">'1.6.0+cu101'</span> <span class="ow">and</span> <span class="n">sys</span><span class="o">.</span><span class="n">platform</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s1">'linux'</span><span class="p">):</span>
|
||||
<span class="o">!</span>pip install pytorch3d
|
||||
<span class="k">else</span><span class="p">:</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">False</span>
|
||||
<span class="k">try</span><span class="p">:</span>
|
||||
<span class="kn">import</span> <span class="nn">pytorch3d</span>
|
||||
<span class="k">except</span> <span class="n">ModuleNotFoundError</span><span class="p">:</span>
|
||||
<span class="n">need_pytorch3d</span><span class="o">=</span><span class="kc">True</span>
|
||||
<span class="k">if</span> <span class="n">need_pytorch3d</span><span class="p">:</span>
|
||||
<span class="o">!</span>curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz
|
||||
<span class="o">!</span>tar xzf <span class="m">1</span>.10.0.tar.gz
|
||||
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"CUB_HOME"</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">getcwd</span><span class="p">()</span> <span class="o">+</span> <span class="s2">"/cub-1.10.0"</span>
|
||||
<span class="o">!</span>pip install <span class="s1">'git+https://github.com/facebookresearch/pytorch3d.git@stable'</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -141,6 +151,8 @@
|
||||
|
||||
<span class="c1"># Data structures and functions for rendering</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.structures</span> <span class="k">import</span> <span class="n">Meshes</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.vis.plotly_vis</span> <span class="k">import</span> <span class="n">AxisArgs</span><span class="p">,</span> <span class="n">plot_batch_individually</span><span class="p">,</span> <span class="n">plot_scene</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.vis.texture_vis</span> <span class="k">import</span> <span class="n">texturesuv_image_matplotlib</span>
|
||||
<span class="kn">from</span> <span class="nn">pytorch3d.renderer</span> <span class="k">import</span> <span class="p">(</span>
|
||||
<span class="n">look_at_view_transform</span><span class="p">,</span>
|
||||
<span class="n">FoVPerspectiveCameras</span><span class="p">,</span>
|
||||
@@ -151,7 +163,8 @@
|
||||
<span class="n">MeshRenderer</span><span class="p">,</span>
|
||||
<span class="n">MeshRasterizer</span><span class="p">,</span>
|
||||
<span class="n">SoftPhongShader</span><span class="p">,</span>
|
||||
<span class="n">TexturesUV</span>
|
||||
<span class="n">TexturesUV</span><span class="p">,</span>
|
||||
<span class="n">TexturesVertex</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
<span class="c1"># add path for demo utils functions </span>
|
||||
@@ -250,7 +263,6 @@ If running locally, the data is already available at the correct path.</p>
|
||||
|
||||
<span class="c1"># Load obj file</span>
|
||||
<span class="n">mesh</span> <span class="o">=</span> <span class="n">load_objs_as_meshes</span><span class="p">([</span><span class="n">obj_filename</span><span class="p">],</span> <span class="n">device</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
|
||||
<span class="n">texture_image</span><span class="o">=</span><span class="n">mesh</span><span class="o">.</span><span class="n">textures</span><span class="o">.</span><span class="n">maps_padded</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -269,9 +281,31 @@ If running locally, the data is already available at the correct path.</p>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">7</span><span class="p">,</span><span class="mi">7</span><span class="p">))</span>
|
||||
<span class="n">texture_image</span><span class="o">=</span><span class="n">mesh</span><span class="o">.</span><span class="n">textures</span><span class="o">.</span><span class="n">maps_padded</span><span class="p">()</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">imshow</span><span class="p">(</span><span class="n">texture_image</span><span class="o">.</span><span class="n">squeeze</span><span class="p">()</span><span class="o">.</span><span class="n">cpu</span><span class="p">()</span><span class="o">.</span><span class="n">numpy</span><span class="p">())</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s1">'off'</span><span class="p">);</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>PyTorch3D has a built-in way to view the texture map with matplotlib along with the points on the map corresponding to vertices. There is also a method, texturesuv_image_PIL, to get a similar image which can be saved to a file.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">plt</span><span class="o">.</span><span class="n">figure</span><span class="p">(</span><span class="n">figsize</span><span class="o">=</span><span class="p">(</span><span class="mi">7</span><span class="p">,</span><span class="mi">7</span><span class="p">))</span>
|
||||
<span class="n">texturesuv_image_matplotlib</span><span class="p">(</span><span class="n">mesh</span><span class="o">.</span><span class="n">textures</span><span class="p">,</span> <span class="n">subsample</span><span class="o">=</span><span class="kc">None</span><span class="p">)</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
<span class="n">plt</span><span class="o">.</span><span class="n">axis</span><span class="p">(</span><span class="s2">"off"</span><span class="p">);</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -513,7 +547,201 @@ The renderer and associated components can take batched inputs and <strong>rende
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="7.-Conclusion">7. Conclusion<a class="anchor-link" href="#7.-Conclusion">¶</a></h2><p>In this tutorial we learnt how to <strong>load</strong> a textured mesh from an obj file, initialize a PyTorch3D datastructure called <strong>Meshes</strong>, set up an <strong>Renderer</strong> consisting of a <strong>Rasterizer</strong> and a <strong>Shader</strong>, and modify several components of the rendering pipeline.</p>
|
||||
<h2 id="7.-Plotly-visualization">7. Plotly visualization<a class="anchor-link" href="#7.-Plotly-visualization">¶</a></h2><p>If you only want to visualize a mesh, you don't really need to use a differentiable renderer - instead we support plotting of Meshes with plotly. For these Meshes, we use TexturesVertex to define a texture for the rendering.
|
||||
<code>plot_meshes</code> creates a Plotly figure with a trace for each Meshes object.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">verts</span><span class="p">,</span> <span class="n">faces_idx</span><span class="p">,</span> <span class="n">_</span> <span class="o">=</span> <span class="n">load_obj</span><span class="p">(</span><span class="n">obj_filename</span><span class="p">)</span>
|
||||
<span class="n">faces</span> <span class="o">=</span> <span class="n">faces_idx</span><span class="o">.</span><span class="n">verts_idx</span>
|
||||
|
||||
<span class="c1"># Initialize each vertex to be white in color.</span>
|
||||
<span class="n">verts_rgb</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">ones_like</span><span class="p">(</span><span class="n">verts</span><span class="p">)[</span><span class="kc">None</span><span class="p">]</span> <span class="c1"># (1, V, 3)</span>
|
||||
<span class="n">textures</span> <span class="o">=</span> <span class="n">TexturesVertex</span><span class="p">(</span><span class="n">verts_features</span><span class="o">=</span><span class="n">verts_rgb</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">))</span>
|
||||
|
||||
<span class="c1"># Create a Meshes object</span>
|
||||
<span class="n">mesh</span> <span class="o">=</span> <span class="n">Meshes</span><span class="p">(</span>
|
||||
<span class="n">verts</span><span class="o">=</span><span class="p">[</span><span class="n">verts</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)],</span>
|
||||
<span class="n">faces</span><span class="o">=</span><span class="p">[</span><span class="n">faces</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)],</span>
|
||||
<span class="n">textures</span><span class="o">=</span><span class="n">textures</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
<span class="c1"># Render the plotly figure</span>
|
||||
<span class="n">fig</span> <span class="o">=</span> <span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"subplot1"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"cow_mesh"</span><span class="p">:</span> <span class="n">mesh</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">})</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># use Plotly's default colors (no texture)</span>
|
||||
<span class="n">mesh</span> <span class="o">=</span> <span class="n">Meshes</span><span class="p">(</span>
|
||||
<span class="n">verts</span><span class="o">=</span><span class="p">[</span><span class="n">verts</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)],</span>
|
||||
<span class="n">faces</span><span class="o">=</span><span class="p">[</span><span class="n">faces</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)]</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
<span class="c1"># Render the plotly figure</span>
|
||||
<span class="n">fig</span> <span class="o">=</span> <span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"subplot1"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"cow_mesh"</span><span class="p">:</span> <span class="n">mesh</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">})</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># create a batch of meshes, and offset one to prevent overlap</span>
|
||||
<span class="n">mesh_batch</span> <span class="o">=</span> <span class="n">Meshes</span><span class="p">(</span>
|
||||
<span class="n">verts</span><span class="o">=</span><span class="p">[</span><span class="n">verts</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">),</span> <span class="p">(</span><span class="n">verts</span> <span class="o">+</span> <span class="mi">2</span><span class="p">)</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)],</span>
|
||||
<span class="n">faces</span><span class="o">=</span><span class="p">[</span><span class="n">faces</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">),</span> <span class="n">faces</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)]</span>
|
||||
<span class="p">)</span>
|
||||
|
||||
<span class="c1"># plot mesh batch in the same trace</span>
|
||||
<span class="n">fig</span> <span class="o">=</span> <span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"subplot1"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"cow_mesh_batch"</span><span class="p">:</span> <span class="n">mesh_batch</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">})</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># plot batch of meshes in different traces</span>
|
||||
<span class="n">fig</span> <span class="o">=</span> <span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"subplot1"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"cow_mesh1"</span><span class="p">:</span> <span class="n">mesh_batch</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span>
|
||||
<span class="s2">"cow_mesh2"</span><span class="p">:</span> <span class="n">mesh_batch</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">})</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># plot batch of meshes in different subplots</span>
|
||||
<span class="n">fig</span> <span class="o">=</span> <span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"subplot1"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"cow_mesh1"</span><span class="p">:</span> <span class="n">mesh_batch</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span>
|
||||
<span class="p">},</span>
|
||||
<span class="s2">"subplot2"</span><span class="p">:{</span>
|
||||
<span class="s2">"cow_mesh2"</span><span class="p">:</span> <span class="n">mesh_batch</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">})</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>For batches, we can also use <code>plot_batch_individually</code> to avoid constructing the scene dictionary ourselves.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="c1"># extend the batch to have 4 meshes</span>
|
||||
<span class="n">mesh_4</span> <span class="o">=</span> <span class="n">mesh_batch</span><span class="o">.</span><span class="n">extend</span><span class="p">(</span><span class="mi">2</span><span class="p">)</span>
|
||||
|
||||
<span class="c1"># visualize the batch in different subplots, 2 per row</span>
|
||||
<span class="n">fig</span> <span class="o">=</span> <span class="n">plot_batch_individually</span><span class="p">(</span><span class="n">mesh_4</span><span class="p">)</span>
|
||||
<span class="c1"># we can update the figure height and width</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">update_layout</span><span class="p">(</span><span class="n">height</span><span class="o">=</span><span class="mi">1000</span><span class="p">,</span> <span class="n">width</span><span class="o">=</span><span class="mi">500</span><span class="p">)</span>
|
||||
<span class="n">fig</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<p>We can also modify the axis arguments and axis backgrounds in both functions.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">fig2</span> <span class="o">=</span> <span class="n">plot_scene</span><span class="p">({</span>
|
||||
<span class="s2">"cow_plot1"</span><span class="p">:</span> <span class="p">{</span>
|
||||
<span class="s2">"cows"</span><span class="p">:</span> <span class="n">mesh_batch</span>
|
||||
<span class="p">}</span>
|
||||
<span class="p">},</span>
|
||||
<span class="n">xaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(200, 200, 230)"</span><span class="p">},</span>
|
||||
<span class="n">yaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(230, 200, 200)"</span><span class="p">},</span>
|
||||
<span class="n">zaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(200, 230, 200)"</span><span class="p">},</span>
|
||||
<span class="n">axis_args</span><span class="o">=</span><span class="n">AxisArgs</span><span class="p">(</span><span class="n">showgrid</span><span class="o">=</span><span class="kc">True</span><span class="p">))</span>
|
||||
<span class="n">fig2</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing code_cell rendered">
|
||||
<div class="input">
|
||||
<div class="prompt input_prompt">In [ ]:</div>
|
||||
<div class="inner_cell">
|
||||
<div class="input_area">
|
||||
<div class="highlight hl-ipython3"><pre><span></span><span class="n">fig3</span> <span class="o">=</span> <span class="n">plot_batch_individually</span><span class="p">(</span>
|
||||
<span class="n">mesh_4</span><span class="p">,</span>
|
||||
<span class="n">ncols</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span>
|
||||
<span class="n">subplot_titles</span> <span class="o">=</span> <span class="p">[</span><span class="s2">"cow1"</span><span class="p">,</span> <span class="s2">"cow2"</span><span class="p">,</span> <span class="s2">"cow3"</span><span class="p">,</span> <span class="s2">"cow4"</span><span class="p">],</span> <span class="c1"># customize subplot titles</span>
|
||||
<span class="n">xaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(200, 200, 230)"</span><span class="p">},</span>
|
||||
<span class="n">yaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(230, 200, 200)"</span><span class="p">},</span>
|
||||
<span class="n">zaxis</span><span class="o">=</span><span class="p">{</span><span class="s2">"backgroundcolor"</span><span class="p">:</span><span class="s2">"rgb(200, 230, 200)"</span><span class="p">},</span>
|
||||
<span class="n">axis_args</span><span class="o">=</span><span class="n">AxisArgs</span><span class="p">(</span><span class="n">showgrid</span><span class="o">=</span><span class="kc">True</span><span class="p">))</span>
|
||||
<span class="n">fig3</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="cell border-box-sizing text_cell rendered"><div class="prompt input_prompt">
|
||||
</div><div class="inner_cell">
|
||||
<div class="text_cell_render border-box-sizing rendered_html">
|
||||
<h2 id="8.-Conclusion">8. Conclusion<a class="anchor-link" href="#8.-Conclusion">¶</a></h2><p>In this tutorial we learnt how to <strong>load</strong> a textured mesh from an obj file, initialize a PyTorch3D datastructure called <strong>Meshes</strong>, set up an <strong>Renderer</strong> consisting of a <strong>Rasterizer</strong> and a <strong>Shader</strong>, and modify several components of the rendering pipeline. We also learned how to render Meshes in Plotly figures.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
Reference in New Issue
Block a user