mirror of
https://github.com/facebookresearch/pytorch3d.git
synced 2025-12-22 23:30:35 +08:00
Update latest version of site
This commit is contained in:
@@ -6,7 +6,7 @@
|
||||
|
||||
ga('create', 'UA-157376881-1', 'auto');
|
||||
ga('send', 'pageview');
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span>Renderer</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
|
||||
</script><script type="text/javascript" src="https://buttons.github.io/buttons.js"></script><script src="/js/scrollSpy.js"></script><link rel="stylesheet" href="/css/main.css"/><script src="/js/codetabs.js"></script></head><body class="sideNavVisible separateOnPageNav"><div class="fixedHeaderContainer"><div class="headerWrapper wrapper"><header><a href="/"><img class="logo" src="/img/pytorch3dfavicon.png" alt="PyTorch3D"/><h2 class="headerTitleWithLogo">PyTorch3D</h2></a><div class="navigationWrapper navigationSlider"><nav class="slidingNav"><ul class="nav-site nav-site-internal"><li class="siteNavGroupActive"><a href="/docs/why_pytorch3d" target="_self">Docs</a></li><li class=""><a href="/tutorials" target="_self">Tutorials</a></li><li class=""><a href="https://pytorch3d.readthedocs.io/" target="_self">API</a></li><li class=""><a href="https://github.com/facebookresearch/pytorch3d" target="_self">GitHub</a></li></ul></nav></div></header></div></div><div class="navPusher"><div class="docMainWrapper wrapper"><div class="docsNavContainer" id="docsNav"><nav class="toc"><div class="toggleNav"><section class="navWrapper wrapper"><div class="navBreadcrumb wrapper"><div class="navToggle" id="navToggler"><div class="hamburger-menu"><div class="line1"></div><div class="line2"></div><div class="line3"></div></div></div><h2><i>›</i><span>Renderer</span></h2><div class="tocToggler" id="tocToggler"><i class="icon-toc"></i></div></div><div class="navGroups"><div class="navGroup"><h3 class="navGroupCategoryTitle">Introduction</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/why_pytorch3d">Why PyTorch3D</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Data</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/meshes_io">Loading from file</a></li><li class="navListItem"><a class="navItem" href="/docs/datasets">Data loaders</a></li><li class="navListItem"><a class="navItem" href="/docs/batching">Batching</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Ops</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/cubify">Cubify</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Visualization</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/visualization">Plotly Visualization</a></li></ul></div><div class="navGroup"><h3 class="navGroupCategoryTitle">Renderer</h3><ul class=""><li class="navListItem"><a class="navItem" href="/docs/renderer">Overview</a></li><li class="navListItem navListItemActive"><a class="navItem" href="/docs/renderer_getting_started">Getting Started</a></li><li class="navListItem"><a class="navItem" href="/docs/cameras">Cameras</a></li></ul></div></div></section></div><script>
|
||||
var coll = document.getElementsByClassName('collapsible');
|
||||
var checkActiveCategory = true;
|
||||
for (var i = 0; i < coll.length; i++) {
|
||||
@@ -83,7 +83,7 @@ giving the barycentric coordinates in NDC units of the nearest faces at each pix
|
||||
<h3><a class="anchor" aria-hidden="true" id="coordinate-transformation-conventions"></a><a href="#coordinate-transformation-conventions" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"></path></svg></a>Coordinate transformation conventions</h3>
|
||||
<p>Rendering requires transformations between several different coordinate frames: world space, view/camera space, NDC space and screen space. At each step it is important to know where the camera is located, how the +X, +Y, +Z axes are aligned and the possible range of values. The following figure outlines the conventions used PyTorch3D.</p>
|
||||
<p><img src="assets/transforms_overview.jpg" width="1000"></p>
|
||||
<p>For example, given a teapot mesh, the world coordinate frame, camera coordiante frame and image are show in the figure below. Note that the world and camera coordinate frames have the +z direction pointing in to the page.</p>
|
||||
<p>For example, given a teapot mesh, the world coordinate frame, camera coordinate frame and image are shown in the figure below. Note that the world and camera coordinate frames have the +z direction pointing in to the page.</p>
|
||||
<p><img src="assets/world_camera_image.jpg" width="1000"></p>
|
||||
<hr>
|
||||
<p><strong>NOTE: PyTorch3D vs OpenGL</strong></p>
|
||||
@@ -94,11 +94,16 @@ giving the barycentric coordinates in NDC units of the nearest faces at each pix
|
||||
</ul>
|
||||
<p><img align="center" src="assets/opengl_coordframes.png" width="300"></p>
|
||||
<hr>
|
||||
<h3><a class="anchor" aria-hidden="true" id="the-pulsar-backend"></a><a href="#the-pulsar-backend" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"></path></svg></a>The pulsar backend</h3>
|
||||
<p>Since v0.3, <a href="https://arxiv.org/abs/2004.07484">pulsar</a> can be used as a backend for point-rendering. It has a focus on efficiency, which comes with pros and cons: it is highly optimized and all rendering stages are integrated in the CUDA kernels. This leads to significantly higher speed and better scaling behavior. We use it at Facebook Reality Labs to render and optimize scenes with millions of spheres in resolutions up to 4K. You can find a runtime comparison plot below (settings: <code>bin_size=None</code>, <code>points_per_pixel=5</code>, <code>image_size=1024</code>, <code>radius=1e-2</code>, <code>composite_params.radius=1e-4</code>; benchmarked on an RTX 2070 GPU).</p>
|
||||
<p><img align="center" src="assets/pulsar_bm.png" width="300"></p>
|
||||
<p>Pulsar's processing steps are tightly integrated CUDA kernels and do not work with custom <code>rasterizer</code> and <code>compositor</code> components. We provide two ways to use Pulsar: (1) there is a unified interface to match the PyTorch3D calling convention seamlessly. This is, for example, illustrated in the <a href="https://github.com/facebookresearch/pytorch3d/blob/master/docs/tutorials/render_colored_points.ipynb">point cloud tutorial</a>. (2) There is a direct interface available to the pulsar backend, which exposes the full functionality of the backend (including opacity, which is not yet available in PyTorch3D). Examples showing its use as well as the matching PyTorch3D interface code are available in <a href="https://github.com/facebookresearch/pytorch3d/tree/master/docs/examples">this folder</a>.</p>
|
||||
<hr>
|
||||
<h3><a class="anchor" aria-hidden="true" id="texturing-options"></a><a href="#texturing-options" aria-hidden="true" class="hash-link"><svg class="hash-link-icon" aria-hidden="true" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"></path></svg></a>Texturing options</h3>
|
||||
<p>For mesh texturing we offer several options (in <code>pytorch3d/renderer/mesh/texturing.py</code>):</p>
|
||||
<ol>
|
||||
<li><strong>Vertex Textures</strong>: D dimensional textures for each vertex (for example an RGB color) which can be interpolated across the face. This can be represented as an <code>(N, V, D)</code> tensor. This is a fairly simple representation though and cannot model complex textures if the mesh faces are large.</li>
|
||||
<li><strong>UV Textures</strong>: vertex UV coordinates and <strong>one</strong> texture map for the whole face. For a point on a face with given barycentric coordinates, the face color can be computed by interpolating the vertex uv coordinates and then sampling from the texture map. This representation requires two tensors (UVs: <code>(N, V, 2), Texture map:</code>(N, H, W, 3)`), and is limited to only support one texture map per mesh.</li>
|
||||
<li><strong>UV Textures</strong>: vertex UV coordinates and <strong>one</strong> texture map for the whole mesh. For a point on a face with given barycentric coordinates, the face color can be computed by interpolating the vertex uv coordinates and then sampling from the texture map. This representation requires two tensors (UVs: <code>(N, V, 2), Texture map:</code>(N, H, W, 3)`), and is limited to only support one texture map per mesh.</li>
|
||||
<li><strong>Face Textures</strong>: In more complex cases such as ShapeNet meshes, there are multiple texture maps per mesh and some faces have texture while other do not. For these cases, a more flexible representation is a texture atlas, where each face is represented as an <code>(RxR)</code> texture map where R is the texture resolution. For a given point on the face, the texture value can be sampled from the per face texture map using the barycentric coordinates of the point. This representation requires one tensor of shape <code>(N, F, R, R, 3)</code>. This texturing method is inspired by the SoftRasterizer implementation. For more details refer to the <a href="https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/io/mtl_io.py#L123"><code>make_material_atlas</code></a> and <a href="https://github.com/facebookresearch/pytorch3d/blob/master/pytorch3d/renderer/mesh/textures.py#L452"><code>sample_textures</code></a> functions.</li>
|
||||
</ol>
|
||||
<p><img src="assets/texturing.jpg" width="1000"></p>
|
||||
@@ -153,4 +158,4 @@ renderer = MeshRenderer(
|
||||
<tr><td>SoftSilhouetteShader</td><td style="text-align:center"></td><td style="text-align:center"></td><td style="text-align:center"></td><td style="text-align:center"></td><td style="text-align:center"></td><td style="text-align:center"></td><td style="text-align:center"></td><td style="text-align:center">✔️</td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</span></div></article></div><div class="docLastUpdate"><em>Last updated by Nikhila Ravi</em></div><div class="docs-prevnext"><a class="docs-prev button" href="/docs/renderer"><span class="arrow-prev">← </span><span>Overview</span></a><a class="docs-next button" href="/docs/cameras"><span>Cameras</span><span class="arrow-next"> →</span></a></div></div></div><nav class="onPageNav"></nav></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2020 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>
|
||||
</span></div></article></div><div class="docLastUpdate"><em>Last updated by Christoph Lassner</em></div><div class="docs-prevnext"><a class="docs-prev button" href="/docs/renderer"><span class="arrow-prev">← </span><span>Overview</span></a><a class="docs-next button" href="/docs/cameras"><span>Cameras</span><span class="arrow-next"> →</span></a></div></div></div><nav class="onPageNav"></nav></div><footer class="nav-footer" id="footer"><section class="sitemap"><div class="footerSection"><div class="social"><a class="github-button" href="https://github.com/facebookresearch/pytorch3d" data-count-href="https://github.com/facebookresearch/pytorch3d/stargazers" data-show-count="true" data-count-aria-label="# stargazers on GitHub" aria-label="Star PyTorch3D on GitHub">pytorch3d</a></div></div></section><a href="https://opensource.facebook.com/" target="_blank" rel="noreferrer noopener" class="fbOpenSource"><img src="/img/oss_logo.png" alt="Facebook Open Source" width="170" height="45"/></a><section class="copyright">Copyright © 2020 Facebook Inc<br/>Legal:<a href="https://opensource.facebook.com/legal/privacy/" target="_blank" rel="noreferrer noopener">Privacy</a><a href="https://opensource.facebook.com/legal/terms/" target="_blank" rel="noreferrer noopener">Terms</a></section></footer></div></body></html>
|
||||
Reference in New Issue
Block a user